[Triton][Config] Add muls_add triton kernel and refactor AscendCompilationConfig (#5518)

### What this PR does / why we need it?
Add muls_add triton kernel with related fusion pass. What's more, this
PR refactors `AscendCompilationConfig` and delete `NpugraphExConfig`.

### Does this PR introduce _any_ user-facing change?
None

### How was this patch tested?
CI passed with new added test.


- vLLM version: v0.13.0
- vLLM main:
45c1ca1ca1

---------

Signed-off-by: whx-sjtu <2952154980@qq.com>
This commit is contained in:
whx
2026-03-02 17:54:25 +08:00
committed by GitHub
parent 8547520726
commit 16c879cdf7
14 changed files with 290 additions and 98 deletions

View File

@@ -30,7 +30,7 @@ from vllm.compilation.compiler_interface import CompilerInterface
from vllm.config import VllmConfig
from vllm.config.utils import Range
from vllm_ascend.ascend_config import NpugraphExConfig, get_ascend_config
from vllm_ascend.ascend_config import AscendCompilationConfig, get_ascend_config
from vllm_ascend.utils import COMPILATION_PASS_KEY
@@ -71,7 +71,7 @@ def npugraph_ex_compile(
example_inputs: list[Any],
compiler_config: dict[str, Any],
vllm_config: VllmConfig,
npugraph_ex_config: NpugraphExConfig,
ascend_compilation_config: AscendCompilationConfig,
compile_range: Range,
key: str | None = None,
) -> tuple[Callable | None, Any | None]:
@@ -83,7 +83,7 @@ def npugraph_ex_compile(
config.mode = "reduce-overhead"
# execute FX graph in eager mode before graph mode to optimize FX graph.
config.debug.run_eagerly = True
if npugraph_ex_config.enable_static_kernel:
if ascend_compilation_config.enable_static_kernel:
config.experimental_config.aclgraph._aclnn_static_shape_kernel = True
# According to the cudagraph_capture_size configuration, set the shapes
# that can trigger the compilation of static kernel. If this configuration is
@@ -117,8 +117,8 @@ class AscendCompiler(CompilerInterface):
name = "AscendCompiler"
def compute_hash(self, vllm_config: VllmConfig) -> str:
npugraph_ex_config = get_ascend_config().npugraph_ex_config
if npugraph_ex_config.enable:
npugraph_ex_enabled = get_ascend_config().ascend_compilation_config.enable_npugraph_ex
if npugraph_ex_enabled:
self.vllm_config = vllm_config
return vllm_config.compute_hash()
@@ -134,11 +134,11 @@ class AscendCompiler(CompilerInterface):
# see https://github.com/pytorch/pytorch/issues/138980
graph = copy.deepcopy(graph)
npugraph_ex_config = get_ascend_config().npugraph_ex_config
if npugraph_ex_config.enable:
ascend_compilation_config = get_ascend_config().ascend_compilation_config
if ascend_compilation_config.enable_npugraph_ex:
assert hasattr(self, "vllm_config")
return npugraph_ex_compile(
graph, example_inputs, compiler_config, self.vllm_config, npugraph_ex_config, compile_range, key
graph, example_inputs, compiler_config, self.vllm_config, ascend_compilation_config, compile_range, key
)
else:
return fusion_pass_compile(graph, example_inputs, compiler_config, compile_range, key)