[npugraph_ex]enable npugraph_ex by default (#6664)
### What this PR does / why we need it? This pull request enables the `npugraph_ex` backend by default to improve performance on Ascend NPUs, as proposed in the [RFC](https://github.com/vllm-project/vllm-ascend/issues/6214). ### Does this PR introduce _any_ user-facing change? Yes. `npugraph_ex` is now enabled by default. Users can disable it by setting `enable: false` in the `npugraph_ex_config` section of the `additional_config`. ### How was this patch tested? CI passed. The changes are covered by existing and new E2E tests (`test_aclgraph_accuracy.py`) and unit tests (`test_ascend_config.py`) that have been updated to reflect the new default behavior. The tests verify correctness and consistency with `npugraph_ex` enabled and disabled, as well as with the new static kernel option. Signed-off-by: huyuanquan1 <huyuanquan1@huawei.com> Co-authored-by: huyuanquan1 <huyuanquan1@huawei.com>
This commit is contained in:
@@ -88,7 +88,7 @@ def npugraph_ex_compile(
|
||||
# that can trigger the compilation of static kernel. If this configuration is
|
||||
# not applied, new shapes will trigger the compilation of static kernels,
|
||||
# affecting program execution.
|
||||
num_spec_tokens = vllm_config.speculative_config.num_speculative_token if vllm_config.speculative_config else 0
|
||||
num_spec_tokens = vllm_config.speculative_config.num_speculative_tokens if vllm_config.speculative_config else 0
|
||||
uniform_decode_query_len = num_spec_tokens + 1
|
||||
max_num_tokens = vllm_config.scheduler_config.max_num_seqs * uniform_decode_query_len
|
||||
decode_cudagraph_batch_sizes = [
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
from torch import fx as fx
|
||||
from vllm.config import VllmConfig
|
||||
|
||||
from vllm_ascend.ascend_config import get_ascend_config
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is("0.15.0"):
|
||||
@@ -55,18 +56,19 @@ class NpuGraphEXPassManager:
|
||||
|
||||
def configure(self, config: VllmConfig):
|
||||
# By default, we enable the graph fusion and quantization fusion pass.
|
||||
self.npugraph_ex_config: dict = config.additional_config.get("npugraph_ex_config", {})
|
||||
if self.npugraph_ex_config.get("fuse_norm_quant", True):
|
||||
self.npugraph_ex_config = get_ascend_config().npugraph_ex_config
|
||||
|
||||
if self.npugraph_ex_config.fuse_norm_quant:
|
||||
from .npugraph_ex_passes.graphex_norm_quant_fusion_pass import GraphEXAddRMSNormFusionPass
|
||||
|
||||
self.passes.append(GraphEXAddRMSNormFusionPass(config))
|
||||
|
||||
if self.npugraph_ex_config.get("fuse_qknorm_rope", True):
|
||||
if self.npugraph_ex_config.fuse_qknorm_rope:
|
||||
from .npugraph_ex_passes.graphex_qknorm_rope_fusion_pass import GraphEXQKNormRopeFusionPass
|
||||
|
||||
self.passes.append(GraphEXQKNormRopeFusionPass(config))
|
||||
|
||||
if self.npugraph_ex_config.get("fuse_allreduce_rms", True):
|
||||
if self.npugraph_ex_config.fuse_allreduce_rms:
|
||||
from .npugraph_ex_passes.graphex_allreduce_rmsnorm_fusion_pass import GraphEXMatmulAllReduceAddRMSNormPass
|
||||
|
||||
self.passes.append(GraphEXMatmulAllReduceAddRMSNormPass(config))
|
||||
|
||||
Reference in New Issue
Block a user