[Feature] support aclgraph for model runner v2 (#7110)

### What this PR does / why we need it?
This PR aims to support aclgraph for model runner v2, please see RFC
#5208. The PR contains these modifications:
- adapt to newest commit of vllm main branch.
- supply a unified interface of extra forward context for both model
runner v1 and model runner v2.
- implement graph mode for main model. 

### Does this PR introduce _any_ user-facing change?
no

### How was this patch tested?

- vLLM version: v0.16.0
- vLLM main:
4034c3d32e

---------

Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
Ronald
2026-03-13 09:11:46 +08:00
committed by GitHub
parent 1f71da80eb
commit c980e68d40
52 changed files with 840 additions and 309 deletions

View File

@@ -57,9 +57,9 @@ from vllm.distributed import (
tensor_model_parallel_reduce_scatter,
)
from vllm.distributed.parallel_state import get_tp_group
from vllm.forward_context import get_forward_context
from vllm_ascend.ascend_config import get_ascend_config
from vllm_ascend.ascend_forward_context import _EXTRA_CTX
from vllm_ascend.distributed.parallel_state import (
get_flashcomm2_odp_group,
get_flashcomm2_otp_group,
@@ -311,8 +311,7 @@ class Flashcomm2OProjRowParallelOp(CustomRowParallelOp):
input_parallel = splitted_input[tp_rank].contiguous()
# padding for all-to-all
forward_context = get_forward_context()
num_padding_tokens = forward_context.pad_size
num_padding_tokens = _EXTRA_CTX.pad_size
if num_padding_tokens > 0:
input_parallel = nn.functional.pad(input_parallel, (0, 0, 0, num_padding_tokens))
@@ -368,7 +367,7 @@ class Flashcomm2OProjRowParallelOp(CustomRowParallelOp):
else:
output = output_parallel
if not forward_context.flash_comm_v1_enabled:
if not _EXTRA_CTX.flash_comm_v1_enabled:
# flashcomm1 not enabled
output = get_tp_group().all_gather(output, 0)
if num_padding_tokens > 0:
@@ -514,9 +513,8 @@ class SequenceRowParallelOp(CustomRowParallelOp):
def matmul_and_reduce(self, input_parallel: torch.Tensor, bias_: Parameter | None) -> torch.Tensor:
assert self.quant_method is not None
try:
forward_context = get_forward_context()
flash_comm_v1_enabled = forward_context.flash_comm_v1_enabled
mmrs_fusion = forward_context.mmrs_fusion
flash_comm_v1_enabled = _EXTRA_CTX.flash_comm_v1_enabled
mmrs_fusion = _EXTRA_CTX.mmrs_fusion
except AssertionError:
flash_comm_v1_enabled = False
mmrs_fusion = False
@@ -527,7 +525,7 @@ class SequenceRowParallelOp(CustomRowParallelOp):
output_parallel = self.layer.quant_method.apply(self.layer, x, bias=bias_)
return tensor_model_parallel_all_reduce(output_parallel)
pad_size = forward_context.pad_size
pad_size = _EXTRA_CTX.pad_size
if pad_size > 0 and not (enable_dsa_cp() and "o_proj" in self.layer.prefix):
x = F.pad(x, (0, 0, 0, pad_size))