We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 20568f7 commit 98a4d21Copy full SHA for 98a4d21
vllm_ascend/ops/__init__.py
@@ -16,11 +16,15 @@
16
#
17
18
import torch
19
+from vllm.triton_utils import HAS_TRITON
20
21
import vllm_ascend.ops.fused_moe.fused_moe # noqa
22
import vllm_ascend.ops.layernorm # noqa
23
import vllm_ascend.ops.register_custom_ops # noqa
-import vllm_ascend.ops.triton.linearnorm.split_qkv_rmsnorm_rope # noqa
24
+
25
+if HAS_TRITON:
26
+ import vllm_ascend.ops.triton.linearnorm.split_qkv_rmsnorm_rope # noqa
27
28
import vllm_ascend.ops.vocab_parallel_embedding # noqa
29
from vllm_ascend.ops.activation import AscendQuickGELU, AscendSiluAndMul
30
from vllm_ascend.ops.rotary_embedding import (
0 commit comments