adapt to main2main for model runner v2 (#7578)
### What this PR does / why we need it?
This PR aims to adapt to newest commit of vllm main branch for model
runner v2. please refer to
https://github.com/vllm-project/vllm-ascend/issues/5208
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
- vLLM version: v0.18.0
- vLLM main:
ed359c497a
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
@@ -79,14 +79,12 @@ class AscendInputBatch(InputBatch):
|
||||
num_reqs: int,
|
||||
num_tokens: int,
|
||||
input_buffers: AscendInputBuffers,
|
||||
device: torch.device,
|
||||
) -> "AscendInputBatch":
|
||||
"""Override the make_dummy method to calculate seq_lens_np."""
|
||||
input_batch = InputBatch.make_dummy(
|
||||
num_reqs,
|
||||
num_tokens,
|
||||
input_buffers,
|
||||
device,
|
||||
)
|
||||
# seq_len equals to query_len
|
||||
input_buffers.seq_lens_np[:num_reqs] = num_tokens // num_reqs
|
||||
|
||||
Reference in New Issue
Block a user