From f58e110afe65653c50f909f701e8ce7fec20054a Mon Sep 17 00:00:00 2001 From: aipaes <82140963+aipaes@users.noreply.github.com> Date: Mon, 19 Jan 2026 21:19:25 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90feat=E3=80=91switch=20for=20fusion=20o?= =?UTF-8?q?ps=20gmmswigluquant=20(#5992)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What this PR does / why we need it? Set a additional config parameter to control whether the gmmswigluequant fuseion operator is enabled; it is enabled by True. / When enabled with a small number of GPUs, the gmmswigluquant fused operator can cause some performance degradation. ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.13.0 - vLLM main: https://github.com/vllm-project/vllm/commit/2c24bc6996cb165fce92f780b388a5e39b3f4060 #### Perf test model: GLM 4.6(w8a8) - single A3 node(ep16, tp16), async-scheduling, mtp, FULL_DECODE_ONLY - bs=1, input_lens=32000, ouput_lens=1024 Without this PR: TPOT 32.22.ms With this PR: TPOT 30.23ms --------- Signed-off-by: zjks98 Co-authored-by: zjks98 --- tests/ut/test_ascend_config.py | 9 +++++++++ vllm_ascend/ascend_config.py | 21 ++++++++++++++++++++ vllm_ascend/ops/fused_moe/moe_comm_method.py | 9 ++++++++- vllm_ascend/platform.py | 7 +++++++ 4 files changed, 45 insertions(+), 1 deletion(-) diff --git a/tests/ut/test_ascend_config.py b/tests/ut/test_ascend_config.py index 614f5aeb..c08e75e0 100644 --- a/tests/ut/test_ascend_config.py +++ b/tests/ut/test_ascend_config.py @@ -43,6 +43,9 @@ class TestAscendConfig(TestBase): ascend_compilation_config = ascend_config.ascend_compilation_config self.assertTrue(ascend_compilation_config.fuse_norm_quant) + ascend_fusion_config = ascend_config.ascend_fusion_config + self.assertTrue(ascend_fusion_config.fusion_ops_gmmswigluquant) + @_clean_up_ascend_config def test_init_ascend_config_with_additional_config(self): test_vllm_config = VllmConfig() @@ -50,6 +53,9 @@ class TestAscendConfig(TestBase): "ascend_compilation_config": { "fuse_norm_quant": False, }, + "ascend_fusion_config": { + "fusion_ops_gmmswigluquant": False, + }, "multistream_overlap_shared_expert": True, "eplb_config": { "num_redundant_experts": 2 @@ -66,6 +72,9 @@ class TestAscendConfig(TestBase): self.assertFalse(ascend_compilation_config.fuse_norm_quant) self.assertFalse(ascend_config.enable_kv_nz) + ascend_fusion_config = ascend_config.ascend_fusion_config + self.assertFalse(ascend_fusion_config.fusion_ops_gmmswigluquant) + @_clean_up_ascend_config def test_init_ascend_config_enable_npugraph_ex(self): test_vllm_config = VllmConfig() diff --git a/vllm_ascend/ascend_config.py b/vllm_ascend/ascend_config.py index 43f9641e..029651a6 100644 --- a/vllm_ascend/ascend_config.py +++ b/vllm_ascend/ascend_config.py @@ -37,6 +37,9 @@ class AscendConfig: ascend_compilation_config = additional_config.get("ascend_compilation_config", {}) self.ascend_compilation_config = AscendCompilationConfig(**ascend_compilation_config) + ascend_fusion_config = additional_config.get("ascend_fusion_config", {}) + self.ascend_fusion_config = AscendFusionConfig(**ascend_fusion_config) + finegrained_tp_config = additional_config.get("finegrained_tp_config", {}) self.finegrained_tp_config = FinegrainedTPConfig(finegrained_tp_config, vllm_config) @@ -190,6 +193,24 @@ class AscendCompilationConfig: self.fuse_allreduce_rms = fuse_allreduce_rms +class AscendFusionConfig: + """ + Configuration for controlling whether to use a fused operator gmmswigluquant. + """ + + def __init__(self, fusion_ops_gmmswigluquant: bool = True, **kwargs): + """ + Initialize the configuration. + + Args: + fusion_ops_gmmswigluquant (bool): Whether to use a fused operator gmmswigluquant. + When set to True, the system will use a fused operator gmmswigluquant. + Default: True + **kwargs: Additional optional parameters for forward compatibility and configuration extension. + """ + self.fusion_ops_gmmswigluquant = fusion_ops_gmmswigluquant + + class XliteGraphConfig: """ Configuration Object for xlite_graph_config from additional_config diff --git a/vllm_ascend/ops/fused_moe/moe_comm_method.py b/vllm_ascend/ops/fused_moe/moe_comm_method.py index 829f8f5f..41cad9af 100644 --- a/vllm_ascend/ops/fused_moe/moe_comm_method.py +++ b/vllm_ascend/ops/fused_moe/moe_comm_method.py @@ -48,6 +48,12 @@ def setup_moe_comm_method(moe_config): _MoECommMethods[MoECommType.FUSED_MC2] = FusedMC2CommImpl(moe_config) +def set_gmmswigluquant_method(): + from vllm_ascend.ascend_config import get_ascend_config + ascend_config = get_ascend_config() + return ascend_config.ascend_fusion_config.fusion_ops_gmmswigluquant + + @dataclass class FusedExpertsResult: routed_out: torch.Tensor @@ -69,6 +75,7 @@ class MoECommMethod(ABC): self.token_dispatcher = self._get_token_dispatcher() self.prepare_finalize = self._get_prepare_finalize() + self.use_fusion_ops = set_gmmswigluquant_method() def prepare( self, @@ -159,7 +166,7 @@ class MoECommMethod(ABC): w2_offset=w2_offset, topk_scales=dispatch_results.topk_scales, with_quant=use_int8_w8a8 or use_int4_w4a8 or use_int4_w4a16, - fusion=use_int8_w8a8, + fusion=use_int8_w8a8 and self.use_fusion_ops, need_trans=need_trans, dynamic_eplb=dynamic_eplb) diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index 4e2a35c9..b9ac4404 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -206,6 +206,13 @@ class NPUPlatform(Platform): elif model_config and hasattr(model_config.hf_text_config, "index_topk"): vllm_config.cache_config.cache_dtype = str(model_config.dtype).replace("torch.", "") + + ascend_fusion_config = ascend_config.ascend_fusion_config + if ascend_fusion_config: + vllm_config.additional_config.setdefault("ascend_fusion_config", {}).update( + vars(ascend_fusion_config) if not isinstance(ascend_fusion_config, dict) else ascend_fusion_config + ) + if model_config is None: logger.warning("Model config is missing. This may indicate that we are running a test case") enforce_eager = False