diff --git a/vllm_ascend/ops/fused_moe.py b/vllm_ascend/ops/fused_moe.py index 533c20b..97489f9 100644 --- a/vllm_ascend/ops/fused_moe.py +++ b/vllm_ascend/ops/fused_moe.py @@ -295,6 +295,7 @@ class AscendFusedMoE(FusedMoE): in_dtype=params_dtype, ) self.moe_config = moe + # TODO: The self.moe_config.tp_size here is not correct, fixme soon if quant_config is None: self.quant_method = AscendUnquantizedFusedMoEMethod(moe) diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index dbfe1dc..1f12c59 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -16,6 +16,7 @@ # import gc +import os from datetime import timedelta from typing import TYPE_CHECKING, Optional, Tuple @@ -260,6 +261,8 @@ class NPUPlatform(Platform): compilation_config.level = CompilationLevel.NO_COMPILATION if parallel_config and parallel_config.worker_cls == "auto": + # TODO: this is a tricky way to disable `use_sequence_parallel_moe` in vllm. + os.environ["VLLM_ALL2ALL_BACKEND"] = "flashinfer_all2allv" if ascend_config.torchair_graph_config.enabled or ascend_config.enable_shared_expert_dp: parallel_config.worker_cls = "vllm_ascend.torchair.torchair_worker.NPUTorchairWorker" else: