From 9a0b786f2bfaa358e237cee34dc700e1aec0ecdc Mon Sep 17 00:00:00 2001 From: pppeng <60355449+ppppeng@users.noreply.github.com> Date: Mon, 27 Apr 2026 23:28:52 +0800 Subject: [PATCH] [bugfix][0.18.0] Fix race in non-blocking num_accepted_tokens (#8764) ### What this PR does / why we need it? The same fix from https://github.com/vllm-project/vllm/pull/36013. In _update_states_after_model_execute, num_accepted_tokens is copied from GPU to pinned CPU memory with non_blocking=True. The CPU-side numpy view is later read in _build_attention_metadata during the next execute_model call. With async scheduling, _bookkeeping_sync deliberately avoids any CUDA synchronization (the whole point of async scheduling), so there is no guarantee the DMA has landed before the CPU read. Signed-off-by: ppppeng --- vllm_ascend/worker/model_runner_v1.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vllm_ascend/worker/model_runner_v1.py b/vllm_ascend/worker/model_runner_v1.py index 6cbd26ce..5df036d1 100644 --- a/vllm_ascend/worker/model_runner_v1.py +++ b/vllm_ascend/worker/model_runner_v1.py @@ -2041,6 +2041,8 @@ class NPUModelRunner(GPUModelRunner): else: max_seq_len = self.seq_lens.np[:num_reqs].max().item() if use_spec_decode and self.need_accepted_tokens: + if self.num_accepted_tokens_event is not None: + self.num_accepted_tokens_event.synchronize() self.num_accepted_tokens.np[:num_reqs] = self.input_batch.num_accepted_tokens_cpu[:num_reqs] self.num_accepted_tokens.np[num_reqs:].fill(1) self.num_accepted_tokens.copy_to_gpu()