adapt to main2main for model runner v2 (#7578)
### What this PR does / why we need it?
This PR aims to adapt to newest commit of vllm main branch for model
runner v2. please refer to
https://github.com/vllm-project/vllm-ascend/issues/5208
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
- vLLM version: v0.18.0
- vLLM main:
ed359c497a
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
@@ -20,6 +20,7 @@
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
from vllm.config.compilation import CUDAGraphMode
|
||||
from vllm.v1.kv_cache_interface import KVCacheConfig
|
||||
from vllm.v1.worker.gpu.model_states.default import DefaultModelState
|
||||
from vllm.v1.worker.utils import AttentionGroup
|
||||
@@ -34,18 +35,28 @@ class AscendModelState(DefaultModelState):
|
||||
def prepare_attn(
|
||||
self,
|
||||
input_batch: AscendInputBatch,
|
||||
cudagraph_mode: CUDAGraphMode,
|
||||
block_tables: tuple[torch.Tensor, ...],
|
||||
slot_mappings: torch.Tensor,
|
||||
attn_groups: list[list[AttentionGroup]],
|
||||
kv_cache_config: KVCacheConfig,
|
||||
for_capture: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
"""Override prepare_attn method because `build_attn_metadata` is different from vllm."""
|
||||
if cudagraph_mode == CUDAGraphMode.FULL:
|
||||
# Use padded sizes - padding is handled by model_runner.prepare_attn.
|
||||
num_reqs = input_batch.num_reqs_after_padding
|
||||
num_tokens = input_batch.num_tokens_after_padding
|
||||
else:
|
||||
# For piecewise cudagraphs and eager, use unpadded sizes.
|
||||
num_reqs = input_batch.num_reqs
|
||||
num_tokens = input_batch.num_tokens
|
||||
query_start_loc_cpu = torch.from_numpy(input_batch.query_start_loc_np)
|
||||
max_query_len = input_batch.num_scheduled_tokens.max().item()
|
||||
attn_metadata = build_attn_metadata(
|
||||
attn_groups=attn_groups,
|
||||
num_reqs=input_batch.num_reqs,
|
||||
num_tokens=input_batch.num_tokens,
|
||||
num_reqs=num_reqs,
|
||||
num_tokens=num_tokens,
|
||||
query_start_loc_gpu=input_batch.query_start_loc,
|
||||
query_start_loc_cpu=query_start_loc_cpu,
|
||||
max_query_len=max_query_len,
|
||||
|
||||
Reference in New Issue
Block a user