Skip to content

Commit 06a6693

Browse files
authored
Remove mindie_turbo (#4896)
mindie_turbo is out of data for long time. This PR remove the related register method. - vLLM version: v0.12.0 - vLLM main: vllm-project/vllm@ad32e3e Signed-off-by: wangxiyuan <[email protected]>
1 parent b89763f commit 06a6693

File tree

4 files changed

+1
-53
lines changed

4 files changed

+1
-53
lines changed

tests/ut/test_utils.py

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -112,27 +112,6 @@ def test_aligned_16(self):
112112
output_tensor = utils.aligned_16(input_tensor)
113113
self.assertEqual(output_tensor.shape[0], 32)
114114

115-
@mock.patch('importlib.util.find_spec')
116-
@mock.patch('importlib.import_module')
117-
def test_try_register_lib(self, mock_import_module, mock_find_spec):
118-
# import OK
119-
mock_find_spec.return_value = mock.MagicMock()
120-
mock_import_module.return_value = mock.MagicMock()
121-
lib_name = "existing_lib"
122-
lib_info = "Library found and imported successfully"
123-
utils.try_register_lib(lib_name, lib_info)
124-
125-
# Can't find lib
126-
mock_find_spec.return_value = None
127-
lib_name = "non_existing_lib"
128-
utils.try_register_lib(lib_name)
129-
130-
# import error
131-
mock_find_spec.return_value = mock.MagicMock()
132-
mock_import_module.side_effect = ImportError("import error")
133-
lib_name = "error_lib"
134-
utils.try_register_lib(lib_name)
135-
136115
def test_enable_custom_op(self):
137116
result = utils.enable_custom_op()
138117
self.assertTrue(result)

tests/ut/worker/test_worker_v1.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -52,14 +52,12 @@ def setUp(self):
5252
@patch("vllm_ascend.worker.worker_v1.get_ascend_config")
5353
@patch("vllm_ascend.worker.worker_v1.init_ascend_config")
5454
@patch("vllm_ascend.worker.worker_v1.check_ascend_device_type")
55-
@patch("vllm_ascend.worker.worker_v1.try_register_lib")
5655
@patch(init_cached_hf_modules_path)
5756
@patch("vllm_ascend.worker.worker_v1.NPUWorker._init_profiler")
5857
def test_init_npu_worker_normal_case(
5958
self,
6059
mock_init_profiler,
6160
mock_init_cached_hf_modules,
62-
mock_try_register_lib,
6361
mock_check_ascend_device_type,
6462
mock_init_ascend_config,
6563
mock_get_ascend_config,
@@ -94,12 +92,6 @@ def test_init_npu_worker_normal_case(
9492
mock_init_ascend_config.assert_called_once_with(self.vllm_config_mock)
9593
mock_check_ascend_device_type.assert_called_once()
9694

97-
# Verify try_register_lib call
98-
mock_try_register_lib.assert_called_once_with(
99-
"mindie_turbo",
100-
"MindIE Turbo is installed. vLLM inference will be accelerated with MindIE Turbo.",
101-
)
102-
10395
# Verify cache_dtype setting
10496
self.assertEqual(worker.cache_dtype, torch.float16)
10597
mock_init_profiler.assert_called_once()
@@ -114,14 +106,12 @@ def test_init_npu_worker_normal_case(
114106
@patch("vllm_ascend.worker.worker_v1.get_ascend_config")
115107
@patch("vllm_ascend.worker.worker_v1.init_ascend_config")
116108
@patch("vllm_ascend.worker.worker_v1.check_ascend_device_type")
117-
@patch("vllm_ascend.worker.worker_v1.try_register_lib")
118109
@patch(init_cached_hf_modules_path)
119110
@patch("vllm_ascend.worker.worker_v1.NPUWorker._init_profiler")
120111
def test_init_npu_worker_with_trust_remote_code(
121112
self,
122113
mock_init_profiler,
123114
mock_init_cached_hf_modules,
124-
mock_try_register_lib,
125115
mock_check_ascend_device_type,
126116
mock_init_ascend_config,
127117
mock_get_ascend_config,
@@ -159,14 +149,12 @@ def test_init_npu_worker_with_trust_remote_code(
159149
@patch("vllm_ascend.worker.worker_v1.get_ascend_config")
160150
@patch("vllm_ascend.worker.worker_v1.init_ascend_config")
161151
@patch("vllm_ascend.worker.worker_v1.check_ascend_device_type")
162-
@patch("vllm_ascend.worker.worker_v1.try_register_lib")
163152
@patch(init_cached_hf_modules_path)
164153
@patch("vllm_ascend.worker.worker_v1.NPUWorker._init_profiler")
165154
def test_init_npu_worker_with_custom_cache_dtype(
166155
self,
167156
mock_init_profiler,
168157
mock_init_cached_hf_modules,
169-
mock_try_register_lib,
170158
mock_check_ascend_device_type,
171159
mock_init_ascend_config,
172160
mock_get_ascend_config,

vllm_ascend/utils.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -219,19 +219,6 @@ def aligned_16(tensor: torch.Tensor):
219219
return new_tensor
220220

221221

222-
def try_register_lib(lib_name: str, lib_info: str = ""):
223-
import importlib
224-
import importlib.util
225-
try:
226-
module_spec = importlib.util.find_spec(lib_name)
227-
if module_spec is not None:
228-
importlib.import_module(lib_name)
229-
if lib_info:
230-
logger.info(lib_info)
231-
except Exception:
232-
pass
233-
234-
235222
def enable_custom_op():
236223
"""
237224
Enable lazy init for vllm_ascend_C to avoid early initialization of CANN's RTS component.

vllm_ascend/worker/worker_v1.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,7 @@
5353
from vllm_ascend.ops.triton.triton_utils import init_device_properties_triton
5454
from vllm_ascend.platform import NPUPlatform
5555
from vllm_ascend.utils import (check_ascend_device_type, enable_sp,
56-
is_enable_nz, register_ascend_customop,
57-
try_register_lib)
56+
is_enable_nz, register_ascend_customop)
5857
from vllm_ascend.worker.model_runner_v1 import NPUModelRunner
5958

6059
torch._dynamo.trace_rules.clear_lru_cache() # noqa: E402
@@ -111,11 +110,6 @@ def __init__(
111110
except Exception:
112111
logger.info("Skip binding cpu.")
113112

114-
# Try to import mindie_turbo to accelerate vLLM inference.
115-
try_register_lib(
116-
"mindie_turbo",
117-
"MindIE Turbo is installed. vLLM inference will be accelerated with MindIE Turbo."
118-
)
119113
if self.cache_config.cache_dtype == "auto":
120114
self.cache_dtype = self.model_config.dtype
121115
else:

0 commit comments

Comments
 (0)