Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/vllm_ascend_test_nightly_a3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,9 @@ jobs:
- name: deepseek-r1-0528-w8a8-prefix-cache
os: linux-aarch64-a3-16
tests: tests/e2e/nightly/features/test_prefix_cache_deepseek_r1_0528_w8a8.py
- name: qwq-32b-a3
os: linux-aarch64-a3-4
tests: tests/e2e/nightly/models/test_qwq_32b.py
uses: ./.github/workflows/_e2e_nightly_single_node.yaml
with:
vllm: v0.11.0
Expand Down
120 changes: 120 additions & 0 deletions tests/e2e/nightly/models/test_qwq_32b.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from typing import Any

import openai
import pytest
from vllm.utils import get_open_port

from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

MODELS = [
"Qwen/QwQ-32B",
]

MODES = [
"aclgraph",
"single",
]

TENSOR_PARALLELS = [4]

prompts = [
"San Francisco is a",
]

api_keyword_args = {
"max_tokens": 10,
}

aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 32768,
"batch_size": 32,
"baseline": 95,
"threshold": 5
}, {
"case_type": "performance",
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 176,
"max_out_len": 1500,
"batch_size": 44,
"baseline": 1,
"threshold": 0.97
}]


@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("mode", MODES)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
async def test_models(model: str, mode: str, tp_size: int) -> None:
port = get_open_port()
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"OMP_PROC_BIND": "false",
"VLLM_USE_V1": "1",
"HCCL_OP_EXPANSION_MODE": "AIV",
"VLLM_ASCEND_ENABLE_FLASHCOMM": "1",
"VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE": "1",
"VLLM_ASCEND_ENABLE_DEBSE_OPTIMIZE": "1",
"VLLM_ASCEND_ENABLE_PREFETCH": "1"
}
server_args = [
"--tensor-parallel-size",
str(tp_size), "--port",
str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
"36864", "--block-size", "128", "--trust-remote-code",
"--gpu-memory-utilization", "0.9", "--compilation_config",
'{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes": [1, 8, 24, 48, 60]}',
"--reasoning-parser", "deepseek_r1", "--distributed_executor_backend",
"mp"
]
if mode == "single":
server_args.remove("--compilation_config")
server_args.remove(
'{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes": [1, 8, 24, 48, 60]}'
)
server_args.append("--additional-config")
server_args.append('{"ascend_scheduler_config":{"enabled":true}}')
server_args.append("--enforce-eager")
Comment on lines +83 to +100
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The current method of constructing server_args by defining a default list and then modifying it with list.remove() is brittle and can lead to runtime errors. If the initial list is changed, the remove() calls might fail with a ValueError. Additionally, the long JSON string for compilation_config is duplicated, making the code harder to maintain.

It's better to build the argument list conditionally from common and mode-specific parts. This approach is more robust, readable, and avoids duplicating configuration strings.

    server_args = [
        "--tensor-parallel-size",
        str(tp_size), "--port",
        str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
        "36864", "--block-size", "128", "--trust-remote-code",
        "--gpu-memory-utilization", "0.9",
    ]
    if mode == "single":
        server_args.extend([
            "--additional-config",
            '{"ascend_scheduler_config":{"enabled":true}}',
            "--enforce-eager",
        ])
    else:  # aclgraph
        server_args.extend([
            "--compilation_config",
            '{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes": [1, 8, 24, 48, 60]}',
        ])
    server_args.extend([
        "--reasoning-parser", "deepseek_r1", "--distributed_executor_backend",
        "mp"
    ])

request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
if mode == "single":
return
# aisbench test
run_aisbench_cases(model, port, aisbench_cases)
Loading