Skip to content

Commit 0d2b18d

Browse files
dequant swiglu quant bugfix
1 parent 89dd326 commit 0d2b18d

File tree

1 file changed

+4
-1
lines changed

1 file changed

+4
-1
lines changed

vllm_ascend/ops/fused_moe/moe_mlp.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,14 +127,17 @@ def quant_apply_mlp(hidden_states: torch.Tensor,
127127
if quantized_hidden_states is not None:
128128
dispose_tensor(quantized_hidden_states)
129129
# act_fn: swiglu
130+
group_diff = torch.diff(group_list, dim=0)
131+
new_group = torch.cat([group_list[0].unsqueeze(0), group_diff],
132+
dim=0)
130133
hidden_states, swiglu_out_scale = torch_npu.npu_dequant_swiglu_quant(
131134
x=hidden_states,
132135
weight_scale=w1_scale[0],
133136
activation_scale=pertoken_scale,
134137
bias=None,
135138
quant_scale=None,
136139
quant_offset=None,
137-
group_index=group_list,
140+
group_index=new_group,
138141
activate_left=True,
139142
quant_mode=1,
140143
)

0 commit comments

Comments
 (0)