### What this PR does / why we need it?
Fix the bug " TypeError: 'NoneType' object is not iterable' " in
vllm_ascend/compilation/acl_graph.py
The reason of that is the attn_metadata is none in the dummy_run of MTP.
- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c
Signed-off-by: chenmenglong <chenmenglong1@huawei.com>
53 lines
1.8 KiB
Python
53 lines
1.8 KiB
Python
import enum
|
|
from typing import Optional
|
|
|
|
import torch
|
|
from vllm.config import CUDAGraphMode, VllmConfig
|
|
from vllm.v1.core.sched.output import SchedulerOutput
|
|
from vllm.v1.sample.metadata import SamplingMetadata
|
|
from vllm.v1.spec_decode.metadata import SpecDecodeMetadata
|
|
|
|
|
|
class SpecDcodeType(enum.Enum):
|
|
NGRAM = 0
|
|
EAGLE = 1
|
|
EAGLE3 = 2
|
|
MTP = 4
|
|
SUFFIX = 5
|
|
|
|
|
|
class Proposer:
|
|
|
|
def __init__(self,
|
|
vllm_config: VllmConfig,
|
|
device: torch.device = None,
|
|
runner=None):
|
|
pass
|
|
|
|
def load_model(self, model):
|
|
"""Called by load_model in model_runner"""
|
|
raise NotImplementedError
|
|
|
|
@torch.inference_mode()
|
|
def dummy_run(self,
|
|
num_tokens: int,
|
|
with_prefill: bool = False,
|
|
in_graph_capturing: bool = False,
|
|
num_reqs: int = 0,
|
|
num_tokens_across_dp: Optional[torch.Tensor] = None,
|
|
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,
|
|
batch_descriptor=None):
|
|
"""Called by dummy_run in modle_runner"""
|
|
raise NotImplementedError
|
|
|
|
def generate_token_ids(self,
|
|
valid_sampled_token_ids: list[list[int]],
|
|
sampling_metadata: SamplingMetadata = None,
|
|
scheduler_output: SchedulerOutput = None,
|
|
spec_decode_metadata: SpecDecodeMetadata = None,
|
|
positions: torch.Tensor = None,
|
|
num_scheduled_tokens: int = 0,
|
|
hidden_states: torch.Tensor = None,
|
|
aux_hidden_states: torch.Tensor = None):
|
|
"""Called by execute_model in model_runner"""
|
|
raise NotImplementedError |