Skip to content

Commit 4169433

Browse files
authored
[0.11.0]Chery pick pta upgrade change (vllm-project#3940)
This PR cherry-pick two commit from main to upgrade torch-npu to 2.7.1 official release --------- Signed-off-by: wangxiyuan <[email protected]>
1 parent 397d9fb commit 4169433

File tree

16 files changed

+88
-166
lines changed

16 files changed

+88
-166
lines changed

CMakeLists.txt

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,13 @@ set(VLLM_ASCEND_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}")
2020

2121
find_package(Torch REQUIRED)
2222

23+
run_python(TORCH_VERSION
24+
"import torch; print(torch.__version__)" "Failed to locate torch path")
25+
# check torch version is 2.7.1
26+
if(NOT ${TORCH_VERSION} VERSION_EQUAL "2.7.1")
27+
message(FATAL_ERROR "Expected PyTorch version 2.7.1, but found ${TORCH_VERSION}")
28+
endif()
29+
2330
set(RUN_MODE "npu" CACHE STRING "cpu/sim/npu")
2431
set(SOC_VERSION ${SOC_VERSION})
2532
message(STATUS "Detected SOC version: ${SOC_VERSION}")

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ By using vLLM Ascend plugin, popular open-source models, including Transformer-l
4343
- Software:
4444
* Python >= 3.9, < 3.12
4545
* CANN >= 8.2.rc1 (Ascend HDK version refers to [here](https://www.hiascend.com/document/detail/zh/canncommercial/82RC1/releasenote/releasenote_0000.html))
46-
* PyTorch >= 2.7.1, torch-npu >= 2.7.1.dev20250724
46+
* PyTorch == 2.7.1, torch-npu == 2.7.1
4747
* vLLM (the same version as vllm-ascend)
4848

4949
## Getting Started

README.zh.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ vLLM 昇腾插件 (`vllm-ascend`) 是一个由社区维护的让vLLM在Ascend NP
4444
- 软件:
4545
* Python >= 3.9, < 3.12
4646
* CANN >= 8.2.rc1 (Ascend HDK 版本参考[这里](https://www.hiascend.com/document/detail/zh/canncommercial/82RC1/releasenote/releasenote_0000.html))
47-
* PyTorch >= 2.7.1, torch-npu >= 2.7.1.dev20250724
47+
* PyTorch == 2.7.1, torch-npu == 2.7.1
4848
* vLLM (与vllm-ascend版本一致)
4949

5050
## 开始使用

docs/source/installation.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ This document describes how to install vllm-ascend manually.
1313
|---------------|----------------------------------|-------------------------------------------|
1414
| Ascend HDK | Refer to [here](https://www.hiascend.com/document/detail/zh/canncommercial/82RC1/releasenote/releasenote_0000.html) | Required for CANN |
1515
| CANN | >= 8.2.RC1 | Required for vllm-ascend and torch-npu |
16-
| torch-npu | >= 2.7.1.dev20250724 | Required for vllm-ascend, No need to install manually, it will be auto installed in below steps |
17-
| torch | >= 2.7.1 | Required for torch-npu and vllm |
16+
| torch-npu | == 2.7.1 | Required for vllm-ascend, No need to install manually, it will be auto installed in below steps |
17+
| torch | == 2.7.1 | Required for torch-npu and vllm |
1818

1919
There are two installation methods:
2020
- **Using pip**: first prepare env manually or via CANN image, then install `vllm-ascend` using pip.

examples/disaggregated_prefill_v1/mooncake_connector_deployment_guide.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
* Software:
66
* Python >= 3.9, < 3.12
77
* CANN >= 8.2.rc1
8-
* PyTorch >= 2.7.1, torch-npu >= 2.7.1.dev20250724
8+
* PyTorch == 2.7.1, torch-npu == 2.7.1
99
* vLLM (same version as vllm-ascend)
1010
* mooncake-transfer-engine reference documentation: https://github.com/kvcache-ai/Mooncake/blob/main/doc/zh/ascend_transport.md
1111

examples/disaggregated_prefill_v1/mooncake_connector_store_deployment_guide.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
* Software:
66
* Python >= 3.9, < 3.12
77
* CANN >= 8.2.rc1
8-
* PyTorch >= 2.7.1, torch-npu >= 2.7.1.dev20250724
8+
* PyTorch == 2.7.1, torch-npu == 2.7.1
99
* vLLM:main branch
1010
* vLLM-Ascend:main branch
1111
* Mooncake:[AscendTransport/Mooncake at pooling-async-memcpy](https://github.com/AscendTransport/Mooncake/tree/pooling-async-memcpy)(Currently available branch code, continuously updated.)

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@ requires = [
1212
"scipy",
1313
"setuptools>=64",
1414
"setuptools-scm>=8",
15-
"torch-npu==2.7.1.dev20250724",
16-
"torch>=2.7.1",
15+
"torch-npu==2.7.1",
16+
"torch==2.7.1",
1717
"torchvision",
1818
"wheel",
1919
"msgpack",

requirements.txt

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ pyyaml
1010
scipy
1111
setuptools>=64
1212
setuptools-scm>=8
13-
torch>=2.7.1
13+
torch==2.7.1
1414
torchvision
1515
wheel
1616
opencv-python-headless<=4.11.0.86 # Required to avoid numpy version conflict with vllm
@@ -23,6 +23,6 @@ quart
2323
numba
2424

2525
# Install torch_npu
26-
--pre
27-
--extra-index-url https://mirrors.huaweicloud.com/ascend/repos/pypi
28-
torch-npu==2.7.1.dev20250724
26+
#--pre
27+
#--extra-index-url https://mirrors.huaweicloud.com/ascend/repos/pypi
28+
torch-npu==2.7.1

tests/ut/ops/test_layernorm.py

Lines changed: 9 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77

88
from tests.ut.base import PytestBase
99
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
10-
from vllm_ascend.utils import version_check
1110

1211

1312
def mock_rms_norm(x, weight, eps):
@@ -18,15 +17,6 @@ def mock_add_rms_norm(x, residual, weight, eps):
1817
return 2 * x, None, 2 * residual
1918

2019

21-
def mock_add_rms_norm_quant(x, residual, weight, quant_scale, quant_offset,
22-
epsilon):
23-
x_out = 2 * x
24-
residual_out = 2 * residual
25-
x_out_quant = x_out.to(torch.int8)
26-
residual_out_quant = residual_out.to(torch.int8)
27-
return x_out_quant, None, residual_out_quant
28-
29-
3020
def mock_add_rms_norm_quant_with_bias(x, residual, weight, quant_scale,
3121
quant_offset, beta, epsilon):
3222
x_out = 2 * x
@@ -43,10 +33,8 @@ def context(self, mocker: MockerFixture):
4333
mocker.patch("torch_npu.npu_rms_norm", side_effect=mock_rms_norm)
4434
mocker.patch("torch_npu.npu_add_rms_norm",
4535
side_effect=mock_add_rms_norm)
46-
torch_npu_check = version_check()
47-
arnq_side_effect = mock_add_rms_norm_quant_with_bias if torch_npu_check else mock_add_rms_norm_quant
4836
mocker.patch("torch_npu.npu_add_rms_norm_quant",
49-
side_effect=arnq_side_effect)
37+
side_effect=mock_add_rms_norm_quant_with_bias)
5038
mocker.patch("torch.ops.vllm.maybe_wait_prefetch_done",
5139
side_effect=lambda x: None)
5240

@@ -82,8 +70,7 @@ def test_forward_oot_with_quant_fusion(self, mocker: MockerFixture):
8270

8371
mock_model_instance = mocker.MagicMock()
8472
mock_forward_context.model_instance = mock_model_instance
85-
torch_npu_check = version_check()
86-
num_hidden_layers = 3 if torch_npu_check else 2
73+
num_hidden_layers = 3
8774
mock_model_instance.model.layers = [
8875
mocker.MagicMock() for _ in range(num_hidden_layers)
8976
]
@@ -136,34 +123,31 @@ def test_forward_oot_with_quant_fusion(self, mocker: MockerFixture):
136123
assert mock_forward_context.fusion_linear == "gate_up_dense"
137124
assert mock_forward_context.layer_idx == 1
138125

139-
if torch_npu_check:
140-
mock_forward_context.fusion_linear = "gate_moe"
126+
mock_forward_context.fusion_linear = "gate_moe"
141127
x_out, residual_out = layer.forward_oot(x, residual)
142128

143-
assert mock_get_forward_context.call_count == 6
144-
fusion_linear_expected = "qkv_moe" if torch_npu_check else "qkv_dense"
129+
assert mock_get_forward_context.call_count == 5
130+
fusion_linear_expected = "qkv_moe"
145131
assert mock_forward_context.fusion_linear == fusion_linear_expected
146132
assert mock_forward_context.layer_idx == 2
147133

148134
x_out, residual_out = layer.forward_oot(x, residual)
149135

150-
assert mock_get_forward_context.call_count == 7
151-
fusion_linear_expected = "gate_moe" if torch_npu_check else "qkv_dense"
136+
assert mock_get_forward_context.call_count == 6
137+
fusion_linear_expected = "gate_moe"
152138
assert mock_forward_context.fusion_linear == fusion_linear_expected
153139
assert mock_forward_context.layer_idx == 2
154140

155-
if not torch_npu_check:
156-
return
157141
# last layer returned directly
158142
x_out, residual_out = layer.forward_oot(x, residual)
159143

160-
assert mock_get_forward_context.call_count == 8
144+
assert mock_get_forward_context.call_count == 7
161145
assert mock_forward_context.fusion_linear == "qkv_moe"
162146
assert mock_forward_context.layer_idx == 3
163147

164148
x_out, residual_out = layer.forward_oot(x, residual)
165149

166-
assert mock_get_forward_context.call_count == 9
150+
assert mock_get_forward_context.call_count == 8
167151
assert mock_forward_context.fusion_linear == "qkv_moe"
168152
assert mock_forward_context.layer_idx == 3
169153

tests/ut/torchair/quantization/test_torchair_w8a8_dynamic.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@ def setUp(self):
2323
@patch("torch_npu.npu_swiglu")
2424
@patch("torch_npu.npu_dynamic_quant")
2525
@patch("torch_npu.npu_moe_finalize_routing")
26-
@patch("torch_npu.npu_moe_init_routing")
26+
@patch("torch_npu.npu_moe_init_routing_quant")
2727
def test_torchair_fused_experts_with_all2all(
28-
self, mock_moe_init_routing, mock_moe_finalize_routing,
28+
self, mock_npu_moe_init_routing_quant, mock_moe_finalize_routing,
2929
mock_dynamic_quant, mock_swiglu, mock_grouped_matmul,
3030
mock_moe_re_routing, mock_all_to_all_single):
3131

@@ -38,11 +38,10 @@ def test_torchair_fused_experts_with_all2all(
3838
placeholder_ones = torch.ones(self.num_tokens, dtype=torch.int32)
3939
mock_all_to_all_single.side_effect = lambda output, input, *args, **kwargs: output.copy_(
4040
input)
41-
mock_moe_init_routing.return_value = (
42-
placeholder_int8,
43-
placeholder_ones,
44-
placeholder_ones,
45-
)
41+
mock_npu_moe_init_routing_quant.return_value = (
42+
placeholder_int8, placeholder_ones, placeholder_ones,
43+
torch.bincount(placeholder_ones, minlength=len(expert_map)),
44+
torch.randn(self.num_tokens))
4645
mock_moe_re_routing.return_value = (placeholder_int8, self.placeholder,
4746
torch.randint(0,
4847
100,

0 commit comments

Comments
 (0)