Skip to content

Commit ec9ec78

Browse files
authored
[TEST]Add initial prefix cache case for nightly test (#3709)
### What this PR does / why we need it? This PR adds the initial prefix cache case for nightly test for Qwen3-32b-int8 on A3, we need test them daily. ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? By running the test - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 Signed-off-by: jiangyunfan1 <[email protected]>
1 parent 6be321b commit ec9ec78

File tree

2 files changed

+119
-5
lines changed

2 files changed

+119
-5
lines changed
Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
2+
# Copyright 2023 The vLLM team.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# This file is a part of the vllm-ascend project.
16+
#
17+
import json
18+
19+
import pytest
20+
from vllm.utils import get_open_port
21+
22+
from tests.e2e.conftest import RemoteOpenAIServer
23+
from tools.aisbench import get_TTFT, run_aisbench_cases
24+
25+
MODELS = [
26+
"vllm-ascend/Qwen3-32B-W8A8",
27+
]
28+
29+
aisbench_warm_up = [{
30+
"case_type": "performance",
31+
"dataset_path": "vllm-ascend/GSM8K-in1024-bs210",
32+
"request_conf": "vllm_api_stream_chat",
33+
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
34+
"num_prompts": 210,
35+
"max_out_len": 2,
36+
"batch_size": 1000,
37+
"baseline": 0,
38+
"threshold": 0.97
39+
}]
40+
41+
aisbench_cases0 = [{
42+
"case_type": "performance",
43+
"dataset_path": "vllm-ascend/prefix0-in3500-bs210",
44+
"request_conf": "vllm_api_stream_chat",
45+
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
46+
"num_prompts": 210,
47+
"max_out_len": 1500,
48+
"batch_size": 48,
49+
"baseline": 1,
50+
"threshold": 0.97
51+
}]
52+
53+
aisbench_cases75 = [{
54+
"case_type": "performance",
55+
"dataset_path": "vllm-ascend/prefix75-in3500-bs210",
56+
"request_conf": "vllm_api_stream_chat",
57+
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
58+
"num_prompts": 210,
59+
"max_out_len": 1500,
60+
"batch_size": 48,
61+
"baseline": 1,
62+
"threshold": 0.97
63+
}]
64+
65+
66+
@pytest.mark.asyncio
67+
@pytest.mark.parametrize("model", MODELS)
68+
async def test_models(model: str) -> None:
69+
port = get_open_port()
70+
env_dict = {"TASK_QUEUE_ENABLE": "1", "HCCL_OP_EXPANSION_MODE": "AIV"}
71+
additional_config = {
72+
"ascend_scheduler_config": {
73+
"enabled": False
74+
},
75+
"enable_weight_nz_layout": True
76+
}
77+
server_args = [
78+
"--quantization", "ascend", "--reasoning-parser", "qwen3",
79+
"--tensor-parallel-size", "4", "--port",
80+
str(port), "--max-model-len", "8192", "--max-num-batched-tokens",
81+
"8192", "--max-num-seqs", "256", "--trust-remote-code",
82+
"--gpu-memory-utilization", "0.9", "--additional-config",
83+
json.dumps(additional_config)
84+
]
85+
with RemoteOpenAIServer(model,
86+
server_args,
87+
server_port=port,
88+
env_dict=env_dict,
89+
auto_port=False):
90+
run_aisbench_cases(model, port, aisbench_warm_up)
91+
result = run_aisbench_cases(model, port, aisbench_cases0)
92+
TTFT0 = get_TTFT(result)
93+
with RemoteOpenAIServer(model,
94+
server_args,
95+
server_port=port,
96+
env_dict=env_dict,
97+
auto_port=False):
98+
run_aisbench_cases(model, port, aisbench_warm_up)
99+
result = run_aisbench_cases(model, port, aisbench_cases75)
100+
TTFT75 = get_TTFT(result)
101+
assert TTFT75 < 0.4 * TTFT0, f"The TTFT for prefix75 {TTFT75} is not less than 0.4*TTFT for prefix0 {TTFT0}."
102+
print(
103+
f"The TTFT for prefix75 {TTFT75} is less than 0.4*TTFT for prefix0 {TTFT0}."
104+
)

tools/aisbench.py

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -205,15 +205,16 @@ def _get_result_performance(self):
205205
f"{dataset_type}dataset.csv")
206206
result_json_file = os.path.join(result_dir,
207207
f"{dataset_type}dataset.json")
208-
self.result_csv = pd.read_csv(result_csv_file)
208+
self.result_csv = pd.read_csv(result_csv_file, index_col=0)
209209
print("Getting performance results from file: ", result_json_file)
210210
with open(result_json_file, 'r', encoding='utf-8') as f:
211211
self.result_json = json.load(f)
212+
self.result = [self.result_csv, self.result_json]
212213

213214
def _get_result_accuracy(self):
214215
acc_file = re.search(r'write csv to (.*)', self.result_line).group(1)
215216
df = pd.read_csv(acc_file)
216-
return float(df.loc[0][-1])
217+
self.result = float(df.loc[0][-1])
217218

218219
def _performance_verify(self):
219220
self._get_result_performance()
@@ -224,21 +225,30 @@ def _performance_verify(self):
224225
) >= self.threshold * self.baseline, f"Performance verification failed. The current Output Token Throughput is {output_throughput} token/s, which is not greater than or equal to {self.threshold} * baseline {self.baseline}."
225226

226227
def _accuracy_verify(self):
227-
acc_value = self._get_result_accuracy()
228+
self._get_result_accuracy()
229+
acc_value = self.result
228230
assert self.baseline - self.threshold <= acc_value <= self.baseline + self.threshold, f"Accuracy verification failed. The accuracy of {self.dataset_path} is {acc_value}, which is not within {self.threshold} relative to baseline {self.baseline}."
229231

230232

231233
def run_aisbench_cases(model, port, aisbench_cases):
234+
aisbench_results = []
232235
aisbench_errors = []
233236
for aisbench_case in aisbench_cases:
234237
try:
235-
with AisbenchRunner(model, port, aisbench_case):
236-
pass
238+
with AisbenchRunner(model, port, aisbench_case) as aisbench:
239+
aisbench_results.append(aisbench.result)
237240
except Exception as e:
241+
aisbench_results.append("")
238242
aisbench_errors.append([aisbench_case, e])
239243
print(e)
240244
for failed_case, error_info in aisbench_errors:
241245
print(
242246
f"The following aisbench case failed: {failed_case}, reason is {error_info}."
243247
)
244248
assert not aisbench_errors, "some aisbench cases failed, info were shown above."
249+
return aisbench_results
250+
251+
252+
def get_TTFT(result):
253+
TTFT = result[0][0].loc["TTFT", "Average"][:-3]
254+
return float(TTFT)

0 commit comments

Comments
 (0)