Skip to content

Commit 4689528

Browse files
author
wangxiaoxin-sherie
committed
add FULL graph.
Signed-off-by: wangxiaoxin-sherie <[email protected]>
1 parent 10772d9 commit 4689528

File tree

6 files changed

+248
-255
lines changed

6 files changed

+248
-255
lines changed

.github/workflows/_e2e_test.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,7 @@ jobs:
175175
VLLM_USE_MODELSCOPE: True
176176
if: ${{ inputs.type == 'full' }}
177177
run: |
178+
pytest -sv tests/e2e/multicard/test_full_graph_mode.py
178179
pytest -sv tests/e2e/multicard/test_data_parallel.py
179180
pytest -sv tests/e2e/multicard/test_expert_parallel.py
180181
# pytest -sv tests/e2e/multicard/test_external_launcher.py

tests/e2e/multicard/test_full_graph_mode.py

Lines changed: 43 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
from tests.e2e.model_utils import check_outputs_equal
3030

3131

32-
def test_models_distributed_Qwen3_MOE_TP2_WITH_FULLGRAPH():
32+
def test_models_distributed_Qwen3_MOE_TP2_WITH_FULL_DECODE_ONLY():
3333
if 'HCCL_OP_EXPANSION_MODE' in os.environ:
3434
del os.environ['HCCL_OP_EXPANSION_MODE']
3535
prompts = [
@@ -70,3 +70,45 @@ def test_models_distributed_Qwen3_MOE_TP2_WITH_FULLGRAPH():
7070
name_0="vllm_eager_outputs",
7171
name_1="vllm_fullgraph_outputs",
7272
)
73+
74+
def test_models_distributed_Qwen3_MOE_TP2_WITH_FULL():
75+
if 'HCCL_OP_EXPANSION_MODE' in os.environ:
76+
del os.environ['HCCL_OP_EXPANSION_MODE']
77+
prompts = [
78+
"Hello, my name is", "The president of the United States is",
79+
"The capital of France is", "The future of AI is"
80+
]
81+
model = "Qwen/Qwen3-30B-A3B"
82+
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
83+
with VllmRunner(model,
84+
max_model_len=1024,
85+
tensor_parallel_size=2,
86+
enforce_eager=False,
87+
compilation_config={"cudagraph_mode":
88+
"FULL"}) as runner:
89+
vllm_fullgraph_outputs = runner.model.generate(prompts,
90+
sampling_params)
91+
92+
with VllmRunner(
93+
model,
94+
max_model_len=1024,
95+
enforce_eager=True,
96+
) as runner:
97+
vllm_eager_outputs = runner.model.generate(prompts, sampling_params)
98+
99+
vllm_fullgraph_outputs_list = []
100+
for output in vllm_fullgraph_outputs:
101+
vllm_fullgraph_outputs_list.append(
102+
(output.outputs[0].index, output.outputs[0].text))
103+
104+
vllm_eager_outputs_list = []
105+
for output in vllm_eager_outputs:
106+
vllm_eager_outputs_list.append(
107+
(output.outputs[0].index, output.outputs[0].text))
108+
109+
check_outputs_equal(
110+
outputs_0_lst=vllm_eager_outputs_list,
111+
outputs_1_lst=vllm_fullgraph_outputs_list,
112+
name_0="vllm_eager_outputs",
113+
name_1="vllm_fullgraph_outputs",
114+
)

0 commit comments

Comments
 (0)