[Feature] support eager mode in model runner v2 (#5210)

### What this PR does / why we need it?
#5051 only implement a basic framework for model runner v2, but there
are still some bugs for e2e functionality, this PR aim to enable basic
functionality.
model runner v2 plans:
https://github.com/vllm-project/vllm-ascend/issues/5208

- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
Ronald
2025-12-29 15:28:34 +08:00
committed by GitHub
parent 4da46da9bf
commit e7e1a7dc05
19 changed files with 528 additions and 44 deletions

View File

@@ -1,5 +1,21 @@
# Adapt from https://github.com/vllm-project/vllm/blob/main/vllm/v1/worker/gpu/model_runner.py
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
import numpy as np
import torch
@@ -19,7 +35,8 @@ from vllm_ascend.worker.v2.attn_utils import (build_attn_metadata,
build_attn_state,
make_attention_mask)
from vllm_ascend.worker.v2.input_batch import AscendInputBuffers
from vllm_ascend.worker.v2.states import AscendRequestState
from vllm_ascend.worker.v2.sample.sampler import AscendSampler
from vllm_ascend.worker.v2.states import AscendRequestState, uva_wrapper
from vllm_ascend.worker.v2.utils import torch_cuda_wrapper
logger = init_logger(__name__)
@@ -29,7 +46,7 @@ class NPUModelRunner(GPUModelRunner):
"""Model runner for Ascend NPUs."""
def __init__(self, vllm_config: VllmConfig, device: torch.device):
with torch_cuda_wrapper():
with (torch_cuda_wrapper(), uva_wrapper()):
super().__init__(vllm_config, device)
# because we will override these attribute, delete these attribute to
@@ -37,6 +54,7 @@ class NPUModelRunner(GPUModelRunner):
del self.cudagraph_manager
del self.req_states
del self.input_buffers
del self.sampler
# NPU specific initializations can be added below.
self.cudagraph_manager: AclGraphManager = AclGraphManager(
@@ -65,6 +83,10 @@ class NPUModelRunner(GPUModelRunner):
device=self.device,
pin_memory=self.pin_memory,
)
# we need to adjust triton operators in sampler,
# so reinitialize sampler here.
self.sampler: AscendSampler = AscendSampler(
logprobs_mode=self.model_config.logprobs_mode, )
# actual seq lengths for query (used in attention backends).
self.actual_seq_lengths_q: list[int] = []
@@ -206,7 +228,9 @@ class NPUModelRunner(GPUModelRunner):
self.req_states.next_prefill_tokens,
idx_mapping_npu,
query_start_loc_gpu,
self.req_states.prefill_token_ids.gpu,
# use prefill_token_ids.copy_to_gpu() because npu doesn't
# support uva buffer.
self.req_states.prefill_token_ids.copy_to_gpu(),
self.req_states.prefill_len.gpu,
self.req_states.num_computed_tokens,
)
@@ -255,7 +279,9 @@ class NPUModelRunner(GPUModelRunner):
num_computed_tokens_cpu=self.req_states.
num_computed_tokens_cpu[idx_mapping_cpu],
block_tables=block_tables,
slot_mappings=slot_mappings,
# torch_npu._reshape_and_cache operator requires slot_mappings to
# be torch.int32.
slot_mappings=slot_mappings.to(torch.int32),
kv_cache_config=self.kv_cache_config,
decode_token_per_req=self.decode_token_per_req,
attn_mask=attn_mask,
@@ -344,3 +370,8 @@ class NPUModelRunner(GPUModelRunner):
req_index]
self.input_buffers.seq_lens_cpu[
i] = num_computed_tokens + num_scheduled_tokens[req_id]
def eplb_warmup(self):
# TODO(Ronald1995): just define the method in case calling error in
# worker, implement it in the future.
pass