[bugfix] asyncscheduler bug fix (#4968)

### What this PR does / why we need it?
now vllm-ascend uses AsyncGPUModelRunnerOutput
,AsyncNPUModelRunnerOutput before is outdated, so we should fix it

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

---------

Signed-off-by: zhenwenqi2024 <zhenwenqi_2022@qq.com>
This commit is contained in:
zhenwenqi2024
2025-12-13 17:04:54 +08:00
committed by GitHub
parent 3581946256
commit 4721e4f53f
3 changed files with 10 additions and 3 deletions

View File

@@ -93,6 +93,7 @@ jobs:
pytest -sv tests/e2e/singlecard/test_completion_with_prompt_embeds.py
pytest -sv tests/e2e/singlecard/test_aclgraph_accuracy.py
pytest -sv tests/e2e/singlecard/test_aclgraph_mem.py
pytest -sv tests/e2e/singlecard/test_async_scheduling.py
pytest -sv tests/e2e/singlecard/test_camem.py
pytest -sv tests/e2e/singlecard/test_guided_decoding.py
# torch 2.8 doesn't work with lora, fix me

View File

@@ -17,8 +17,12 @@ MTP_MODEL = "wemaster/deepseek_mtp_main_random_bf16"
first_prompt = ("The following numbers of the sequence " +
", ".join(str(i) for i in range(10)) + " are:")
example_prompts = [first_prompt, "In one word, the capital of France is "
] + [f"Tell me about the number {i}: " for i in range(32)]
example_prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
default_params = dict(
temperature=0.0, # greedy

View File

@@ -1875,8 +1875,10 @@ class NPUModelRunner(GPUModelRunner):
return AsyncGPUModelRunnerOutput(
model_runner_output=model_runner_output,
sampled_token_ids=sampled_token_ids,
logprobs_tensors=sampler_output.logprobs_tensors,
invalid_req_indices=invalid_req_indices,
async_output_copy_stream=self.async_output_copy_stream,
vocab_size=self.input_batch.vocab_size,
)
def _build_dummy_attn_metadata(
@@ -3472,7 +3474,7 @@ def _torch_cuda_wrapper():
try:
# replace cuda APIs with xpu APIs, this should work by default
torch.cuda.Event = _EventPlaceholder
torch.cuda.Event = torch.npu.Event
torch.cuda.Stream = torch.npu.Stream
torch.cuda.default_stream = torch.npu.default_stream
torch.cuda.current_stream = torch.npu.current_stream