[bugfix](CP) Fix and unify the PD request discrimination logic. (#5939)

### What this PR does / why we need it?
Since the PR (https://github.com/vllm-project/vllm/pull/32118) has
modified the criteria for judging Prefill and Decode requests in vLLM,
PCPManager needs to synchronize with this standard. As PCPManager
involves multiple calculations of PD request counts, this PR attempts to
consolidate the related logic and update the PD request count once per
batch.

### How was this patch tested?
```bash
pytest tests/e2e/multicard/4-cards/long_sequence/test_mtp.py
```

- vLLM version: v0.13.0
- vLLM main:
11b6af5280

Signed-off-by: QiuChunshuo <qiuchunshuo@huawei.com>
This commit is contained in:
Qiu
2026-01-31 10:26:02 +08:00
committed by GitHub
parent 4230bc8646
commit 638cae824d
4 changed files with 88 additions and 111 deletions

View File

@@ -600,10 +600,16 @@ class NPUModelRunner(GPUModelRunner):
req_indices, positions_np)
self.input_batch.block_table.commit_slot_mapping(
total_num_scheduled_tokens)
if self.pcp_size * self.dcp_size > 1:
self.pcp_manager.init_batch_info(
num_scheduled_tokens,
self.input_batch.num_reqs,
)
# for pcp, prefill mtp should use origin scheduleroutput ,
if self.speculative_config and self.pcp_size * self.dcp_size > 1:
self.pcp_manager.generate_pcp_mtp_input(
num_reqs,
total_num_scheduled_tokens,
scheduler_output.num_scheduled_tokens,
with_prefill,
@@ -621,8 +627,6 @@ class NPUModelRunner(GPUModelRunner):
num_reqs], position_pcp = self.pcp_manager.update_tokens_for_pcp(
num_scheduled_tokens[:num_reqs],
self.arange_np,
self.input_batch.num_reqs,
self.reorder_batch_threshold,
)
# Re-update after PCP split sequences.
total_num_scheduled_tokens = sum(num_scheduled_tokens)
@@ -772,8 +776,7 @@ class NPUModelRunner(GPUModelRunner):
num_draft_tokens = None
num_sampled_tokens = np.ones(num_reqs, dtype=np.int32)
if self.pcp_size * self.dcp_size > 1:
logits_indices = self.pcp_manager.get_logits_indices(
cu_num_tokens, num_reqs)
logits_indices = self.pcp_manager.get_logits_indices(cu_num_tokens)
logits_indices = logits_indices.pin_memory().to(
self.device, non_blocking=True)
else:
@@ -1020,9 +1023,8 @@ class NPUModelRunner(GPUModelRunner):
num_reqs = self.input_batch.num_reqs
ori_query_lens = query_start_loc_pcp_full_cpu[1:num_reqs+1] - \
query_start_loc_pcp_full_cpu[:num_reqs]
num_prefill_reqs = (ori_query_lens
> self.decode_threshold).sum().item()
num_decode_reqs = num_reqs - num_prefill_reqs
num_prefill_reqs = self.pcp_manager.num_prefill_reqs
num_decode_reqs = self.pcp_manager.num_decode_reqs
else:
long_seq_metadata = None # type: ignore
num_prefill_reqs = 0
@@ -1976,7 +1978,7 @@ class NPUModelRunner(GPUModelRunner):
)
return blk_table_tensor, slot_mapping
long_seq_metdadata = _get_pcp_metadata(num_tokens)
self.long_seq_metadata = _get_pcp_metadata(num_tokens)
block_table_gid_0, slot_mapping_gid_0 = _get_block_table_and_slot_mapping(0)
actual_last_loc = self.query_start_loc.np[num_reqs_padded]
@@ -2008,7 +2010,7 @@ class NPUModelRunner(GPUModelRunner):
positions=self.positions.gpu,
attn_state=self.attn_state,
decode_token_per_req=self.decode_token_per_req,
prefill_context_parallel_metadata=long_seq_metdadata,
prefill_context_parallel_metadata=self.long_seq_metadata,
)
if logits_indices is not None and self.cache_config.kv_sharing_fast_prefill:
@@ -2198,6 +2200,11 @@ class NPUModelRunner(GPUModelRunner):
force_has_lora=activate_lora,
)
)
if self.pcp_size * self.dcp_size > 1:
self.pcp_manager.init_batch_info(
num_scheduled_tokens,
num_reqs,
)
if cudagraph_runtime_mode is None:
cudagraph_runtime_mode = _cudagraph_mode
else: