From e5b938c5fe614df92c0f39bb4638c5074d41c5fa Mon Sep 17 00:00:00 2001 From: liziyu <56102866+liziyu179@users.noreply.github.com> Date: Wed, 29 Oct 2025 23:14:00 +0800 Subject: [PATCH] [v0.11.0] [P/D] force with_prefill true after allreduce in kv producer (#3835) ### What this PR does / why we need it? force with_prefill true after allreduce in kv producer. This is a backport of #3768 and #3849 --------- Signed-off-by: liziyu --- vllm_ascend/distributed/mooncake_layerwise_connector.py | 3 ++- vllm_ascend/worker/model_runner_v1.py | 8 ++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/vllm_ascend/distributed/mooncake_layerwise_connector.py b/vllm_ascend/distributed/mooncake_layerwise_connector.py index 457c737..c3fb6e1 100644 --- a/vllm_ascend/distributed/mooncake_layerwise_connector.py +++ b/vllm_ascend/distributed/mooncake_layerwise_connector.py @@ -1145,7 +1145,8 @@ class MooncakeLayerwiseConnectorWorker: connector_metadata: MooncakeLayerwiseConnectorMetadata, **kwargs) -> None: """MooncakeLayerwiseConnector does not save explicitly.""" - if self.kv_role == 'kv_producer': + if self.kv_role == 'kv_producer' and connector_metadata.requests.keys( + ): if self.pd_head_ratio != 1: if self.current_layer != 0: self.completion_event.wait() diff --git a/vllm_ascend/worker/model_runner_v1.py b/vllm_ascend/worker/model_runner_v1.py index 7b5a515..0a4fd85 100644 --- a/vllm_ascend/worker/model_runner_v1.py +++ b/vllm_ascend/worker/model_runner_v1.py @@ -2364,6 +2364,10 @@ class NPUModelRunner(LoRAModelRunnerMixin): tp_size = self.vllm_config.parallel_config.tensor_parallel_size num_tokens = math.ceil(num_tokens / tp_size) * tp_size + # Force dummy run on prefill stage when this node is deemed as kv producer. + if self.is_kv_producer and not self.is_kv_consumer: + with_prefill = True + # Padding for DP (num_tokens, num_tokens_across_dp, with_prefill, _) = self._sync_metadata_across_dp(num_tokens, with_prefill, False) @@ -2411,10 +2415,6 @@ class NPUModelRunner(LoRAModelRunnerMixin): num_scheduled_tokens = np.array(num_scheduled_tokens_list, dtype=np.int32) - # Force dummy run on prefill stage when this node is deemed as kv producer. - if self.is_kv_producer and not self.is_kv_consumer: - with_prefill = True - if not self.in_profile_run and self.dynamic_eplb: self.eplb_updator.forward_before()