[Feature] Add docs of batch invariance and make some extra operators patch (#6910)

### What this PR does / why we need it?

This PR add docs of batch invariance and make some extra operators
according to validation result.
please see https://github.com/vllm-project/vllm-ascend/issues/5487 to
track progress.

### Does this PR introduce _any_ user-facing change?
No

### How was this patch tested?

- vLLM version: v0.16.0
- vLLM main:
15d76f74e2

---------

Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
Ronald
2026-03-05 09:12:40 +08:00
committed by GitHub
parent f8315f5717
commit 77e009d9fc
7 changed files with 276 additions and 19 deletions

View File

@@ -124,7 +124,14 @@ class AscendConfig:
# npu_fused_infer_attention_score performs better on all scenarios.
self.pa_shape_list = additional_config.get("pa_shape_list", [])
self.enable_async_exponential = bool(additional_config.get("enable_async_exponential", False))
# when enable_async_exponential is True, AscendSampler will be different from vllm Sampler,
# which make batch_invariant mode not working.
# so we disable async exponential when batch_invariant mode is enabled.
from vllm.model_executor.layers.batch_invariant import vllm_is_batch_invariant
self.enable_async_exponential = (
bool(additional_config.get("enable_async_exponential", False)) and not vllm_is_batch_invariant()
)
self.enable_kv_nz = additional_config.get("enable_kv_nz", False)
if self.enable_kv_nz:

View File

@@ -24,6 +24,9 @@ from vllm.logger import init_logger
from vllm.model_executor.layers.batch_invariant import vllm_is_batch_invariant
from vllm.triton_utils import HAS_TRITON
# in case recursive call in reduce_sum.
torch_sum = torch.sum
logger = init_logger(__name__)
if HAS_TRITON:
@@ -34,6 +37,7 @@ if HAS_TRITON:
matmul_batch_invariant,
mm_batch_invariant,
)
from vllm_ascend.ops.triton.batch_invariant.softmax import softmax_batch_invariant
try:
@@ -44,10 +48,38 @@ except ImportError:
HAS_ASCENDC_BATCH_INVARIANT = False
def add_rms_norm(
x: torch.Tensor,
residual: torch.Tensor,
weight: torch.Tensor,
eps: float,
):
"""AclnnAddRmsNorm can't ensure batch invariant,
so we need to split it into add and rms_norm.
"""
x_ = x + residual
residual_ = x_
x_, _ = torch_npu.npu_rms_norm(x_, weight, eps)
return x_, None, residual_
def reduce_sum(x: torch.Tensor, dim: int | None = None, keepdim: bool = False) -> torch.Tensor:
"""npu_reduce_sum_batch_invariant requires dim to be specified, but torch.sum
doesn't require it, so we set dim to -1 by default if dim is None and x.dim()==1.
"""
dim = -1 if dim is None and x.dim() == 1 else dim
if x.device.type == "npu" and dim is not None:
return torch.ops.batch_invariant_ops.npu_reduce_sum_batch_invariant(x, dim, keepdim)
# cpu tensor can't use npu_reduce_sum_batch_invariant, so we use torch.sum instead.
return torch_sum(x, dim, keepdim)
def override_envs_for_invariance():
# enabling NZ mode introduces NZ format input to the triton operator,
# resulting in accuracy anomalies.
os.environ["VLLM_ASCEND_ENABLE_NZ"] = "0"
# fused operator can't ensure batch invariant, so we disable it.
os.environ["VLLM_ASCEND_ENABLE_MATMUL_ALLREDUCE"] = "0"
# communication determinism settings
os.environ["HCCL_DETERMINISTIC"] = "strict"
@@ -65,6 +97,8 @@ def enable_batch_invariant_mode():
if HAS_TRITON:
_batch_invariant_LIB.impl("aten::addmm", addmm_batch_invariant, "NPU")
_batch_invariant_LIB.impl("aten::bmm", bmm_batch_invariant, "NPU")
_batch_invariant_LIB.impl("aten::softmax", softmax_batch_invariant, "NPU")
_batch_invariant_LIB.impl("aten::_softmax", softmax_batch_invariant, "NPU")
# Register operators implemented in Ascend batch-invariant ops in priority.
if HAS_ASCENDC_BATCH_INVARIANT:
@@ -76,6 +110,10 @@ def enable_batch_invariant_mode():
torch_npu.npu_fused_infer_attention_score = (
torch.ops.batch_invariant_ops.npu_fused_infer_attention_score_batch_invariant
)
# patch npu_add_rms_norm to ensure batch invariant.
torch_npu.npu_add_rms_norm = add_rms_norm
# torch.sum can't be replaced by dispatch logic, so we patch it directly.
torch.sum = reduce_sum
# register triton implementations if ascendc is not available.
elif HAS_TRITON:

View File

@@ -1,4 +1,5 @@
import torch
from vllm.model_executor.layers.batch_invariant import vllm_is_batch_invariant
from vllm.v1.sample.ops.topk_topp_sampler import TopKTopPSampler
from vllm.v1.sample.sampler import Sampler
@@ -73,6 +74,10 @@ class AscendTopKTopPSampler(TopKTopPSampler):
def forward_native(self, logits, generators, k, p):
"""Override pytorch native implementation to torch_npu"""
# when batch_invariant mode is enabled, we should use vllm's implementation.
# or it will make batch_invariant mode not working.
if vllm_is_batch_invariant():
return super().forward_native(logits, generators, k, p)
logits = self.apply_top_k_top_p(logits, k, p)
logits_to_return = None
if self.logprobs_mode == "processed_logits":

View File

@@ -258,10 +258,19 @@ def enable_custom_op():
Enable lazy init for vllm_ascend_C to avoid early initialization of CANN's RTS component.
Ensure that ASCEND_RT_VISIBLE_DEVICES can be dynamically modified before torch.npu.set_device().
"""
from vllm.model_executor.layers.batch_invariant import vllm_is_batch_invariant
global _CUSTOM_OP_ENABLED
if _CUSTOM_OP_ENABLED is not None:
return _CUSTOM_OP_ENABLED
# There are some customed operators which aren't implemented
# with batch invariant in vllm-ascend, we need to disable them.
if vllm_is_batch_invariant():
_CUSTOM_OP_ENABLED = False
return _CUSTOM_OP_ENABLED
try:
# isort: off
# register custom ops into torch_library here