[Feature] adapt to uva buffer and main2main (#6657)
### What this PR does / why we need it?
vllm model runner v2 use uva buffer to prepare input data, but npu
doesn't support uva yet, this pr implement a uvawrapper class to mimic
gpu's uva backend. what's more, this pr make some modifications to adapt
to the newer main branch.
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
- vLLM main:
13397841ab
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
@@ -17,11 +17,7 @@
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
import torch
|
||||
import vllm
|
||||
from vllm.v1.utils import CpuGpuBuffer
|
||||
from vllm.v1.worker.gpu.states import RequestState
|
||||
|
||||
|
||||
@@ -36,7 +32,6 @@ class AscendRequestState(RequestState):
|
||||
num_speculative_steps: int,
|
||||
vocab_size: int,
|
||||
device: torch.device,
|
||||
pin_memory: bool,
|
||||
):
|
||||
super().__init__(
|
||||
max_num_reqs,
|
||||
@@ -45,11 +40,7 @@ class AscendRequestState(RequestState):
|
||||
num_speculative_steps,
|
||||
vocab_size,
|
||||
device,
|
||||
pin_memory,
|
||||
)
|
||||
# because we will override these attribute, delete these attribute to
|
||||
# make sure it's collected by python gc immediately.
|
||||
del self.prefill_token_ids
|
||||
# vllm gpu_model_runner_v2 deprecate the seqs_lens_cpu attribute,
|
||||
# because they think most attention backends do not need it.
|
||||
# However, Ascend attention backend muse uses seqs_lens_cpu,
|
||||
@@ -60,11 +51,6 @@ class AscendRequestState(RequestState):
|
||||
dtype=torch.int32,
|
||||
device="cpu",
|
||||
)
|
||||
# NOTE(Ronald1995): Ascend NPUs do not support UVA yet,
|
||||
# so we use CpuGpuBuffer to allocate prefill_token_ids buffer.
|
||||
self.prefill_token_ids: CpuGpuBuffer = self._make_buffer( # type: ignore
|
||||
(self.max_num_reqs, self.max_model_len), dtype=torch.int32
|
||||
)
|
||||
|
||||
def add_request(
|
||||
self,
|
||||
@@ -72,32 +58,12 @@ class AscendRequestState(RequestState):
|
||||
prompt_len,
|
||||
prefill_token_ids,
|
||||
num_computed_tokens,
|
||||
sampling_params,
|
||||
lora_request,
|
||||
):
|
||||
super().add_request(
|
||||
req_id,
|
||||
prompt_len,
|
||||
prefill_token_ids,
|
||||
num_computed_tokens,
|
||||
sampling_params,
|
||||
lora_request,
|
||||
)
|
||||
req_idx = self.req_id_to_index[req_id]
|
||||
self.num_computed_tokens_cpu[req_idx] = num_computed_tokens
|
||||
|
||||
|
||||
@contextmanager
|
||||
def uva_wrapper():
|
||||
"""Context manager to disable UVA for Ascend NPUs."""
|
||||
|
||||
class UvaBufferWrapper:
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
try:
|
||||
# TODO(Ronald1995): rectify this when NPU support uva.
|
||||
vllm.v1.worker.gpu.states.UvaBuffer = UvaBufferWrapper
|
||||
yield
|
||||
finally:
|
||||
pass
|
||||
|
||||
Reference in New Issue
Block a user