Skip to content

Commit e94384b

Browse files
authored
[Bugfix] Fix broken ViT attention selection for Blackwell device (#30731)
Signed-off-by: Isotr0py <[email protected]>
1 parent b9ff4f2 commit e94384b

File tree

1 file changed

+2
-8
lines changed

1 file changed

+2
-8
lines changed

vllm/model_executor/models/vision.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from transformers import PretrainedConfig
1212

1313
from vllm.attention.backends.registry import AttentionBackendEnum
14-
from vllm.config import VllmConfig, get_current_vllm_config
14+
from vllm.config import VllmConfig
1515
from vllm.distributed import (
1616
get_tensor_model_parallel_rank,
1717
get_tensor_model_parallel_world_size,
@@ -88,16 +88,10 @@ def get_vit_attn_backend(
8888
"""
8989
Get the available attention backend for Vision Transformer.
9090
"""
91-
attn_backend = attn_backend_override
92-
93-
selected_backend = get_current_vllm_config().attention_config.backend
94-
if attn_backend is None:
95-
attn_backend = selected_backend
96-
9791
return current_platform.get_vit_attn_backend(
9892
head_size,
9993
dtype,
100-
backend=attn_backend,
94+
backend=attn_backend_override,
10195
)
10296

10397

0 commit comments

Comments
 (0)