@@ -40,15 +40,15 @@ jobs:
4040 apt install git -y
4141
4242 - name : Checkout vllm-project/vllm-ascend repo
43- uses : actions/checkout@v6
43+ uses : actions/checkout@v6.0.0
4444
4545 - name : Install system dependencies
4646 run : |
4747 apt-get -y install `cat packages.txt`
4848 apt-get -y install gcc g++ cmake libnuma-dev
4949
5050 - name : Checkout vllm-project/vllm repo
51- uses : actions/checkout@v6
51+ uses : actions/checkout@v6.0.0
5252 with :
5353 repository : vllm-project/vllm
5454 ref : ${{ inputs.vllm }}
@@ -91,14 +91,13 @@ jobs:
9191 pytest -sv tests/e2e/singlecard/test_completion_with_prompt_embeds.py
9292 pytest -sv tests/e2e/singlecard/test_aclgraph.py
9393 pytest -sv tests/e2e/singlecard/test_aclgraph_mem.py
94- pytest -sv tests/e2e/singlecard/test_ascend_scheduler.py
9594 pytest -sv tests/e2e/singlecard/test_bge_model.py
9695 pytest -sv tests/e2e/singlecard/test_camem.py
97- pytest -sv tests/e2e/singlecard/test_chunked.py
9896 pytest -sv tests/e2e/singlecard/test_embedding.py
9997 # pytest -sv tests/e2e/singlecard/test_embedding_aclgraph.py
10098 pytest -sv tests/e2e/singlecard/test_guided_decoding.py
101- pytest -sv tests/e2e/singlecard/test_ilama_lora.py
99+ # torch 2.8 doesn't work with lora, fix me
100+ #pytest -sv tests/e2e/singlecard/test_ilama_lora.py
102101 pytest -sv tests/e2e/singlecard/test_profile_execute_duration.py
103102 pytest -sv tests/e2e/singlecard/test_quantization.py
104103 pytest -sv tests/e2e/singlecard/test_sampler.py
@@ -134,15 +133,15 @@ jobs:
134133 apt install git -y
135134
136135 - name : Checkout vllm-project/vllm-ascend repo
137- uses : actions/checkout@v6
136+ uses : actions/checkout@v6.0.0
138137
139138 - name : Install system dependencies
140139 run : |
141140 apt-get -y install `cat packages.txt`
142141 apt-get -y install gcc g++ cmake libnuma-dev
143142
144143 - name : Checkout vllm-project/vllm repo
145- uses : actions/checkout@v6
144+ uses : actions/checkout@v6.0.0
146145 with :
147146 repository : vllm-project/vllm
148147 ref : ${{ inputs.vllm }}
@@ -188,7 +187,8 @@ jobs:
188187 pytest -sv tests/e2e/multicard/test_external_launcher.py
189188 pytest -sv tests/e2e/multicard/test_single_request_aclgraph.py
190189 pytest -sv tests/e2e/multicard/test_fused_moe_allgather_ep.py
191- pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py
190+ # torch 2.8 doesn't work with lora, fix me
191+ #pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py
192192
193193 # To avoid oom, we need to run the test in a single process.
194194 pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_QwQ
@@ -231,7 +231,7 @@ jobs:
231231 git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/
232232
233233 - name : Checkout vllm-project/vllm-ascend repo
234- uses : actions/checkout@v6
234+ uses : actions/checkout@v6.0.0
235235 with :
236236 path : ./vllm-ascend
237237
@@ -241,7 +241,7 @@ jobs:
241241 apt-get -y install gcc g++ cmake libnuma-dev
242242
243243 - name : Checkout vllm-project/vllm repo
244- uses : actions/checkout@v6
244+ uses : actions/checkout@v6.0.0
245245 with :
246246 repository : vllm-project/vllm
247247 ref : ${{ inputs.vllm }}
@@ -266,11 +266,10 @@ jobs:
266266 VLLM_WORKER_MULTIPROC_METHOD : spawn
267267 VLLM_USE_MODELSCOPE : True
268268 run : |
269- pytest -sv \
270- tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_multistream_moe \
271- tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_W4A8DYNAMIC
272- # tests/e2e/multicard/test_qwen3_moe.py::test_models_distributed_Qwen3_MOE_TP2_WITH_EP \
273- # tests/e2e/multicard/test_qwen3_moe.py::test_models_distributed_Qwen3_MOE_W8A8_WITH_EP
269+ pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_multistream_moe
270+ pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_W4A8DYNAMIC
271+ # pytest -sv tests/e2e/multicard/test_qwen3_moe.py::test_models_distributed_Qwen3_MOE_TP2_WITH_EP
272+ # pytest -sv tests/e2e/multicard/test_qwen3_moe.py::test_models_distributed_Qwen3_MOE_W8A8_WITH_EP
274273 pytest -sv tests/e2e/multicard/test_data_parallel_tp2.py
275274
276275 - name : Install Ascend toolkit & triton_ascend (for Qwen3-Next-80B-A3B-Instruct)
@@ -287,4 +286,4 @@ jobs:
287286 VLLM_USE_MODELSCOPE : True
288287 run : |
289288 . /usr/local/Ascend/ascend-toolkit/8.3.RC2/bisheng_toolkit/set_env.sh
290- # pytest -sv tests/e2e/multicard/test_qwen3_next.py
289+ pytest -sv tests/e2e/multicard/test_qwen3_next.py
0 commit comments