[Feature] support eager mode in model runner v2 (#5210)

### What this PR does / why we need it?
#5051 only implement a basic framework for model runner v2, but there
are still some bugs for e2e functionality, this PR aim to enable basic
functionality.
model runner v2 plans:
https://github.com/vllm-project/vllm-ascend/issues/5208

- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
Ronald
2025-12-29 15:28:34 +08:00
committed by GitHub
parent 4da46da9bf
commit e7e1a7dc05
19 changed files with 528 additions and 44 deletions

View File

@@ -21,6 +21,7 @@ from typing import ClassVar, List, Optional, Tuple, Type
import torch
import torch_npu
import vllm.envs as envs_vllm
from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
AttentionLayer, AttentionType)
from vllm.attention.backends.registry import (AttentionBackendEnum,
@@ -54,7 +55,10 @@ class AscendAttentionBackend(AttentionBackend):
@staticmethod
def get_name() -> str:
return "CUSTOM"
# HACK(Ronald1995): vllm `initialize_kv_cache` method in model runner v2 make
# attention name assertion, we just set name to FLASH_ATTN to avoid assertion error.
# rectify this when vllm disable the assertion.
return "CUSTOM" if not envs_vllm.VLLM_USE_V2_MODEL_RUNNER else "FLASH_ATTN"
@staticmethod
def get_impl_cls() -> Type["AscendAttentionBackendImpl"]:
@@ -535,7 +539,10 @@ class AscendAttentionBackendImpl(AttentionImpl):
attn_metadata: AscendMetadata,
output: torch.Tensor):
forward_context: ForwardContext = get_forward_context()
if forward_context.capturing:
# we inherit ForwardContext in model runner v2, when enable model
# runner v2, there is not capturing attribute in forward_context,
# just use getattr to avoid attribute error.
if getattr(forward_context, "capturing", False):
attn_output, num_tokens = self.full_graph_fia(
query, key, value, attn_metadata, output)
output[:num_tokens] = attn_output[:num_tokens]

View File

@@ -5,6 +5,7 @@ from typing import (TYPE_CHECKING, ClassVar, NamedTuple, Optional, Tuple, Type,
import numpy as np
import torch
import torch_npu
import vllm.envs as envs_vllm
from vllm.attention.backends.abstract import AttentionBackend, MLAAttentionImpl
from vllm.attention.backends.utils import PAD_SLOT_ID
from vllm.config import VllmConfig, get_current_vllm_config
@@ -53,7 +54,10 @@ class AscendMLABackend(AttentionBackend):
@staticmethod
def get_name() -> str:
return "ASCEND_MLA"
# HACK(Ronald1995): vllm `initialize_kv_cache` method in model runner v2 make
# attention name assertion, we just set name to FLASH_ATTN to avoid assertion error.
# rectify this when vllm disable the assertion.
return "ASCEND_MLA" if not envs_vllm.VLLM_USE_V2_MODEL_RUNNER else "FLASH_ATTN"
@staticmethod
def get_builder_cls():

View File

@@ -3,6 +3,7 @@ from typing import TYPE_CHECKING, ClassVar, Optional, Tuple, Type, TypeVar
import torch
import torch_npu
import vllm.envs as envs_vllm
from torch import nn
from vllm.attention.backends.abstract import AttentionBackend, MLAAttentionImpl
from vllm.config import CUDAGraphMode, VllmConfig, get_current_vllm_config
@@ -44,7 +45,10 @@ class AscendSFABackend(AttentionBackend):
@staticmethod
def get_name() -> str:
return "ASCEND_SFA"
# HACK(Ronald1995): vllm `initialize_kv_cache` method in model runner v2 make
# attention name assertion, we just set name to FLASH_ATTN to avoid assertion error.
# rectify this when vllm disable the assertion.
return "ASCEND_SFA" if not envs_vllm.VLLM_USE_V2_MODEL_RUNNER else "FLASH_ATTN"
@staticmethod
def get_builder_cls():