Skip to content

Commit b94836c

Browse files
committed
grrr spelling mistake
1 parent 9880e78 commit b94836c

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

vllm/v1/spec_decode/eagle.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -657,8 +657,8 @@ def load_model(self, target_model: nn.Module) -> None:
657657
self.hot_token_ids = load_draft_vocab_pruned(self.vllm_config.speculative_config.draft_vocab_pruned)
658658
device = next(self.model.model.parameters()).device
659659
self.hot_token_ids = self.hot_token_ids.to(device)
660-
# self.model.model.embed_tokens.weight is the model head
661-
self.model.model.embed_tokens.weight.data = self.model.model.embed_tokens.weight.data[self.hot_token_id]
660+
# `self.model.model.embed_tokens.weight` is the model head
661+
self.model.model.embed_tokens.weight.data = self.model.model.embed_tokens.weight.data[self.hot_token_ids]
662662

663663

664664
@torch.inference_mode()

0 commit comments

Comments
 (0)