From df588ed488c12f540afedb08042d5a3037897d3a Mon Sep 17 00:00:00 2001 From: hucong <33891520+underfituu@users.noreply.github.com> Date: Wed, 28 Jan 2026 22:01:01 +0800 Subject: [PATCH] [BugFix] Disable enable_shared_expert_dp by default if tensor_parallel_size=1 (#6361) ### What this PR does / why we need it? Disable enable_shared_expert_dp by default if tensor_parallel_size=1 - vLLM version: v0.14.1 - vLLM main: https://github.com/vllm-project/vllm/commit/dc917cceb877dfd13f98c538c4c96158047d98bd Signed-off-by: underfituu --- vllm_ascend/ascend_config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm_ascend/ascend_config.py b/vllm_ascend/ascend_config.py index 83e92e40..c2420a46 100644 --- a/vllm_ascend/ascend_config.py +++ b/vllm_ascend/ascend_config.py @@ -61,6 +61,7 @@ class AscendConfig: self.enable_shared_expert_dp = ( additional_config.get("enable_shared_expert_dp", False) and vllm_config.parallel_config.enable_expert_parallel + and vllm_config.parallel_config.tensor_parallel_size > 1 ) from vllm_ascend.utils import enable_sp