Skip to content

Commit f557173

Browse files
committed
Upgrade vllm commit hash to 1216
Signed-off-by: zxwang <[email protected]>
1 parent 470ef1a commit f557173

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

vllm_ascend/platform.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,16 +20,16 @@
2020
from typing import TYPE_CHECKING, Optional, Tuple
2121

2222
import torch
23-
if not vllm_version_is('0.12.0'):
24-
from vllm.attention.selector import AttentionSelectorConfig
2523
from vllm.logger import logger
2624
from vllm.platforms import Platform, PlatformEnum
2725

2826
# todo: please remove it when solve cuda hard code in vllm
2927
os.environ["VLLM_DISABLE_SHARED_EXPERTS_STREAM"] = "1"
3028

3129
from vllm_ascend.ascend_config import init_ascend_config
32-
from vllm_ascend.utils import refresh_block_size
30+
from vllm_ascend.utils import refresh_block_size, vllm_version_is
31+
if not vllm_version_is('0.12.0'):
32+
from vllm.attention.selector import AttentionSelectorConfig
3333

3434
# isort: off
3535
from vllm_ascend.utils import (ASCEND_QUANTIZATION_METHOD,

0 commit comments

Comments
 (0)