[Feature] support eager mode in model runner v2 (#5210)
### What this PR does / why we need it?
#5051 only implement a basic framework for model runner v2, but there
are still some bugs for e2e functionality, this PR aim to enable basic
functionality.
model runner v2 plans:
https://github.com/vllm-project/vllm-ascend/issues/5208
- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
@@ -1,8 +1,28 @@
|
||||
# Adapt from https://github.com/vllm-project/vllm/blob/main/vllm/v1/worker/gpu/states.py
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
import torch
|
||||
import vllm
|
||||
from vllm.v1.utils import CpuGpuBuffer
|
||||
from vllm.v1.worker.gpu.states import RequestState, UvaBuffer
|
||||
from vllm.v1.worker.gpu.states import RequestState
|
||||
|
||||
|
||||
class AscendRequestState(RequestState):
|
||||
@@ -18,16 +38,15 @@ class AscendRequestState(RequestState):
|
||||
device: torch.device,
|
||||
pin_memory: bool,
|
||||
):
|
||||
with uva_wrapper():
|
||||
super().__init__(
|
||||
max_num_reqs,
|
||||
max_model_len,
|
||||
max_num_batched_tokens,
|
||||
num_speculative_steps,
|
||||
vocab_size,
|
||||
device,
|
||||
pin_memory,
|
||||
)
|
||||
super().__init__(
|
||||
max_num_reqs,
|
||||
max_model_len,
|
||||
max_num_batched_tokens,
|
||||
num_speculative_steps,
|
||||
vocab_size,
|
||||
device,
|
||||
pin_memory,
|
||||
)
|
||||
# because we will override these attribute, delete these attribute to
|
||||
# make sure it's collected by python gc immediately.
|
||||
del self.prefill_token_ids
|
||||
@@ -78,11 +97,9 @@ def uva_wrapper():
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
# TODO(Ronald1995): rectify this when NPU support uva.
|
||||
global UvaBuffer
|
||||
ori_class = UvaBuffer
|
||||
try:
|
||||
UvaBuffer = UvaBufferWrapper
|
||||
# TODO(Ronald1995): rectify this when NPU support uva.
|
||||
vllm.v1.worker.gpu.states.UvaBuffer = UvaBufferWrapper
|
||||
yield
|
||||
finally:
|
||||
UvaBuffer = ori_class
|
||||
pass
|
||||
|
||||
Reference in New Issue
Block a user