Skip to content

Commit 2139b27

Browse files
committed
Refactor ACL graph size adjustment for speculative decoding
Move the logic for adjusting ACL graph capture sizes for speculative decoding from the generic utility module into a dedicated method within the compilation configuration. This change improves code organization and encapsulation by making the compilation configuration responsible for managing its own state. The model runner now triggers this adjustment directly, providing the necessary context. Signed-off-by: Yizhou Liu <[email protected]>
1 parent ea54388 commit 2139b27

File tree

2 files changed

+12
-31
lines changed

2 files changed

+12
-31
lines changed

vllm_ascend/utils.py

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -571,26 +571,6 @@ def update_aclgraph_sizes(vllm_config: VllmConfig) -> None:
571571
vllm_config.model_config.architectures[0], num_hidden_layers,
572572
len(original_sizes))
573573

574-
# default or defined cudagraph_capture_sizes may not consider num_speculative_tokens>1 scenario
575-
# the maximum size cudagraph_capture_sizes[0] should be greater or equal than
576-
# (num_speculative_tokens+1)*max_num_seqs, otherwise draft model will run in eager mode
577-
if vllm_config.speculative_config is not None and \
578-
vllm_config.speculative_config.num_speculative_tokens > 1:
579-
num_speculative_tokens = vllm_config.speculative_config.num_speculative_tokens
580-
max_num_seqs = vllm_config.scheduler_config.max_num_seqs
581-
original_sizes, compilation_config.cudagraph_capture_sizes = \
582-
compilation_config.cudagraph_capture_sizes, None
583-
assert len(original_sizes) > 0
584-
if original_sizes[0] < (num_speculative_tokens + 1) * max_num_seqs:
585-
enlarged_sizes = [(num_speculative_tokens + 1) * size
586-
for size in original_sizes]
587-
update_cudagraph_capture_sizes(vllm_config, enlarged_sizes)
588-
logger.info(
589-
"Adjusted ACL graphs: %s → %s for speculative decoding",
590-
original_sizes, enlarged_sizes)
591-
else:
592-
compilation_config.cudagraph_capture_sizes = original_sizes
593-
594574

595575
# TODO(wxy): Move to ops module
596576
def dispose_tensor(x: torch.Tensor):

vllm_ascend/worker/model_runner_v1.py

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3910,6 +3910,16 @@ def initialize_aclgraph_capture(self) -> None:
39103910
"; please try cudagraph_mode=PIECEWISE, "
39113911
"and make sure compilation level is piecewise")
39123912

3913+
if (aclgraph_mode.decode_mode() == CUDAGraphMode.FULL
3914+
and aclgraph_mode.separate_routine()
3915+
and self.uniform_decode_query_len > 1):
3916+
self.compilation_config.adjust_cudagraph_sizes_for_spec_decode(
3917+
self.uniform_decode_query_len,
3918+
self.parallel_config.tensor_parallel_size)
3919+
capture_sizes = self.compilation_config.cudagraph_capture_sizes
3920+
self.aclgraph_batch_sizes = (capture_sizes
3921+
if capture_sizes is not None else [])
3922+
39133923
self.aclgraph_dispatcher.initialize_cudagraph_keys(
39143924
self.compilation_config.cudagraph_mode,
39153925
self.uniform_decode_query_len)
@@ -4005,17 +4015,8 @@ def _capture_model(self):
40054015
x for x in self.aclgraph_batch_sizes if x <= max_num_tokens
40064016
and x >= self.uniform_decode_query_len
40074017
]
4008-
compilation_cases_decode = sorted(decode_cudagraph_batch_sizes)
4009-
# TODO: refactor this when vLLM supports mtp>1
4010-
if not all(x % self.uniform_decode_query_len == 0
4011-
for x in decode_cudagraph_batch_sizes):
4012-
raise ValueError(
4013-
"In the MTP fullgraph scenario, each graph size must be an integer multiple of "
4014-
f"(num_speculative_tokens + 1): {self.uniform_decode_query_len}. "
4015-
f"Please modify the cudagraph_capture_sizes variable to be integer multiple of {self.uniform_decode_query_len}, "
4016-
f"while ensuring the maximum cudagraph_capture_sizes does not exceed max_num_seqs * (num_speculative_tokens + 1): {max_num_tokens}. "
4017-
"For example, with MTP=2 and max_num_seqs=16, we recommend setting cudagraph_capture_sizes to [48]."
4018-
)
4018+
compilation_cases_decode = list(
4019+
reversed(decode_cudagraph_batch_sizes))
40194020
self._capture_aclgraphs(
40204021
compilation_cases=compilation_cases_decode,
40214022
aclgraph_runtime_mode=CUDAGraphMode.FULL,

0 commit comments

Comments
 (0)