[Lint]Style: Convert vllm-ascend/ to ruff format(Batch #3) (#5978)

### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
| `vllm_ascend/attention/mla_v1.py` |
| `vllm_ascend/attention/sfa_v1.py` |
| `vllm_ascend/core/recompute_scheduler.py` |
| `vllm_ascend/core/scheduler_dynamic_batch.py` |
| `vllm_ascend/distributed/device_communicators/npu_communicator.py` |
| `vllm_ascend/distributed/device_communicators/pyhccl.py` |
| `vllm_ascend/distributed/device_communicators/pyhccl_wrapper.py` |

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
2c24bc6996

---------

Signed-off-by: MrZ20 <2609716663@qq.com>
Co-authored-by: Soren <user@SorendeMac-mini.local>
This commit is contained in:
SILONG ZENG
2026-01-24 22:10:18 +08:00
committed by GitHub
parent 4e53c1d900
commit 7faa6878a6
9 changed files with 953 additions and 1148 deletions

View File

@@ -16,7 +16,6 @@
#
import os
import time
from typing import Optional
import pandas as pd
from vllm.config import VllmConfig
@@ -25,8 +24,7 @@ from vllm.logger import logger
from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
from vllm.v1.core.sched.output import NewRequestData, SchedulerOutput
from vllm.v1.core.sched.request_queue import (SchedulingPolicy,
create_request_queue)
from vllm.v1.core.sched.request_queue import SchedulingPolicy, create_request_queue
from vllm.v1.core.sched.scheduler import Scheduler
from vllm.v1.engine import EngineCoreEventType
from vllm.v1.kv_cache_interface import KVCacheConfig
@@ -43,8 +41,9 @@ class BudgetRefiner:
if not self.enabled:
return
logger.info(
"Dynamic batch is enabled with SLO limit: {}, and chunked prefill is forced to be activated because dynamic batch relies on it"
.format(str(slo_limit)))
"Dynamic batch is enabled with SLO limit: {}, and chunked prefill is "
"forced to be activated because dynamic batch relies on it".format(str(slo_limit))
)
self.lookup: dict[tuple[int, int], int] = {}
self.context_keys: set[int] = set()
self.dnum_keys: set[int] = set()
@@ -61,19 +60,20 @@ class BudgetRefiner:
"The dynamic batching feature requires the lookup table "
"'profile_table.csv', but it was not found at '%s'. "
"Please download the corresponding table file.",
table_file_path)
table_file_path,
)
self.enabled = False
return
else:
df = pd.read_csv(table_file_path)
grouped = df.groupby(['ctx_len', 'd_num'])
grouped = df.groupby(["ctx_len", "d_num"])
for (ctx_len, d_num), group in grouped:
valid = group[group['cost'] <= slo_limit]
valid = group[group["cost"] <= slo_limit]
if not valid.empty:
max_row = valid.loc[valid['chunk_size'].idxmax()]
max_row = valid.loc[valid["chunk_size"].idxmax()]
assert isinstance(ctx_len, int), "ctx_len must be an integer"
assert isinstance(d_num, int), "d_num must be an integer"
self.lookup[(ctx_len, d_num)] = int(max_row['chunk_size'])
self.lookup[(ctx_len, d_num)] = int(max_row["chunk_size"])
self.context_keys.add(ctx_len)
self.dnum_keys.add(d_num)
self.context_keys = set(sorted(self.context_keys))
@@ -97,7 +97,10 @@ class BudgetRefiner:
logger.warn(f"Table miss for ctx,dnum{aligned_ctx, aligned_dnum}")
budget = self.default_budget
# For debug.
# logger.info(f"budget {budget}, ctx,dnum {aligned_ctx, aligned_dnum}, raw ctx,dnum {num_deocde_tokens, num_decode}")
# logger.info(
# f"budget {budget}, ctx,dnum {aligned_ctx, aligned_dnum}, "
# f"raw ctx,dnum {num_deocde_tokens, num_decode}"
# )
return budget
def refine_budget(self, running_request, budget):
@@ -106,9 +109,8 @@ class BudgetRefiner:
return budget
# assume all running request will be scheduled.
num_decode_token_lst = [
req.num_tokens_with_spec \
for req in running_request \
if req.num_computed_tokens >= req.num_prompt_tokens ]
req.num_tokens_with_spec for req in running_request if req.num_computed_tokens >= req.num_prompt_tokens
]
num_decode = len(num_decode_token_lst)
if num_decode <= 0:
return budget
@@ -125,18 +127,25 @@ class SchedulerDynamicBatch(Scheduler):
vllm_config: VllmConfig,
kv_cache_config: KVCacheConfig,
structured_output_manager: StructuredOutputManager,
block_size: Optional[int] = None,
block_size: int | None = None,
mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY,
include_finished_set: bool = False,
log_stats: bool = False,
) -> None:
super().__init__(vllm_config, kv_cache_config,
structured_output_manager, block_size, mm_registry,
include_finished_set, log_stats)
super().__init__(
vllm_config,
kv_cache_config,
structured_output_manager,
block_size,
mm_registry,
include_finished_set,
log_stats,
)
self.running: list[Request] = []
self.budget_refiner = BudgetRefiner(
default_budget=self.scheduler_config.max_num_batched_tokens,
slo_limit=self.scheduler_config.SLO_limits_for_dynamic_batch)
slo_limit=self.scheduler_config.SLO_limits_for_dynamic_batch,
)
def schedule(self) -> SchedulerOutput:
# NOTE: This scheduling algorithm is developed based on the "super.schedule()"
@@ -159,20 +168,13 @@ class SchedulerDynamicBatch(Scheduler):
req_to_new_blocks: dict[str, KVCacheBlocks] = {}
num_scheduled_tokens: dict[str, int] = {}
token_budget = self.max_num_scheduled_tokens
token_budget = self.budget_refiner.refine_budget(
self.running, token_budget)
token_budget = self.budget_refiner.refine_budget(self.running, token_budget)
# NOTE: We move the prefill requests to the end of the self.running
# list and keep the relative order unchanged. This rearrangement makes this
# scheduling algorithm a strict decode-first chunked prefills.
d_lst = [
req for req in self.running
if req.num_computed_tokens >= req.num_prompt_tokens
]
p_lst = [
req for req in self.running
if req.num_computed_tokens < req.num_prompt_tokens
]
d_lst = [req for req in self.running if req.num_computed_tokens >= req.num_prompt_tokens]
p_lst = [req for req in self.running if req.num_computed_tokens < req.num_prompt_tokens]
self.running = d_lst + p_lst
# Encoder-related.
@@ -189,30 +191,26 @@ class SchedulerDynamicBatch(Scheduler):
while req_index < len(self.running) and token_budget > 0:
request = self.running[req_index]
num_new_tokens = (request.num_tokens_with_spec +
request.num_output_placeholders -
request.num_computed_tokens)
if (0 < self.scheduler_config.long_prefill_token_threshold <
num_new_tokens):
num_new_tokens = (
self.scheduler_config.long_prefill_token_threshold)
num_new_tokens = (
request.num_tokens_with_spec + request.num_output_placeholders - request.num_computed_tokens
)
if 0 < self.scheduler_config.long_prefill_token_threshold < num_new_tokens:
num_new_tokens = self.scheduler_config.long_prefill_token_threshold
num_new_tokens = min(num_new_tokens, token_budget)
# Make sure the input position does not exceed the max model len.
# This is necessary when using spec decoding.
num_new_tokens = min(
num_new_tokens,
self.max_model_len - 1 - request.num_computed_tokens)
num_new_tokens = min(num_new_tokens, self.max_model_len - 1 - request.num_computed_tokens)
# Schedule encoder inputs.
encoder_inputs_to_schedule = None
new_encoder_compute_budget = encoder_compute_budget
if request.has_encoder_inputs:
(encoder_inputs_to_schedule, num_new_tokens,
new_encoder_compute_budget
) = self._try_schedule_encoder_inputs(
request, request.num_computed_tokens, num_new_tokens,
encoder_compute_budget)
(encoder_inputs_to_schedule, num_new_tokens, new_encoder_compute_budget) = (
self._try_schedule_encoder_inputs(
request, request.num_computed_tokens, num_new_tokens, encoder_compute_budget
)
)
if num_new_tokens == 0:
# The request cannot be scheduled because one of the following
@@ -231,9 +229,8 @@ class SchedulerDynamicBatch(Scheduler):
while True:
new_blocks = self.kv_cache_manager.allocate_slots(
request,
num_new_tokens,
num_lookahead_tokens=self.num_lookahead_tokens)
request, num_new_tokens, num_lookahead_tokens=self.num_lookahead_tokens
)
if new_blocks is None:
# The request cannot be scheduled.
# Preempt the lowest-priority request.
@@ -253,8 +250,7 @@ class SchedulerDynamicBatch(Scheduler):
preempted_req.status = RequestStatus.PREEMPTED
preempted_req.num_computed_tokens = 0
if self.log_stats:
preempted_req.record_event(
EngineCoreEventType.PREEMPTED, scheduled_timestamp)
preempted_req.record_event(EngineCoreEventType.PREEMPTED, scheduled_timestamp)
self.waiting.prepend_request(preempted_req)
preempted_reqs.append(preempted_req)
@@ -279,19 +275,15 @@ class SchedulerDynamicBatch(Scheduler):
# Speculative decode related.
if request.spec_token_ids:
num_scheduled_spec_tokens = (num_new_tokens +
request.num_computed_tokens -
request.num_tokens)
num_scheduled_spec_tokens = num_new_tokens + request.num_computed_tokens - request.num_tokens
if num_scheduled_spec_tokens > 0:
# Trim spec_token_ids list to num_scheduled_spec_tokens.
del request.spec_token_ids[num_scheduled_spec_tokens:]
scheduled_spec_decode_tokens[request.request_id] = (
request.spec_token_ids)
scheduled_spec_decode_tokens[request.request_id] = request.spec_token_ids
# Encoder-related.
if encoder_inputs_to_schedule:
scheduled_encoder_inputs[request.request_id] = (
encoder_inputs_to_schedule)
scheduled_encoder_inputs[request.request_id] = encoder_inputs_to_schedule
# Allocate the encoder cache.
for i in encoder_inputs_to_schedule:
self.encoder_cache_manager.allocate(request, i)
@@ -301,8 +293,10 @@ class SchedulerDynamicBatch(Scheduler):
scheduled_loras: set[int] = set()
if self.lora_config:
scheduled_loras = set(
req.lora_request.lora_int_id for req in scheduled_running_reqs
if req.lora_request and req.lora_request.lora_int_id > 0)
req.lora_request.lora_int_id
for req in scheduled_running_reqs
if req.lora_request and req.lora_request.lora_int_id > 0
)
assert len(scheduled_loras) <= self.lora_config.max_loras
# Use a temporary RequestQueue to collect requests that need to be
@@ -323,9 +317,7 @@ class SchedulerDynamicBatch(Scheduler):
if is_ready:
request.status = RequestStatus.WAITING
else:
logger.debug(
"%s is still in WAITING_FOR_REMOTE_KVS state.",
request.request_id)
logger.debug("%s is still in WAITING_FOR_REMOTE_KVS state.", request.request_id)
self.waiting.pop_request()
skipped_waiting_requests.prepend_request(request)
continue
@@ -343,9 +335,14 @@ class SchedulerDynamicBatch(Scheduler):
# Check that adding the request still respects the max_loras
# constraint.
if (self.lora_config and request.lora_request and
(len(scheduled_loras) == self.lora_config.max_loras and
request.lora_request.lora_int_id not in scheduled_loras)):
if (
self.lora_config
and request.lora_request
and (
len(scheduled_loras) == self.lora_config.max_loras
and request.lora_request.lora_int_id not in scheduled_loras
)
):
# Scheduling would exceed max_loras, skip.
self.waiting.pop_request()
skipped_waiting_requests.prepend_request(request)
@@ -357,15 +354,15 @@ class SchedulerDynamicBatch(Scheduler):
# Get already-cached tokens.
if request.num_computed_tokens == 0:
# Get locally-cached tokens.
new_computed_blocks, num_new_local_computed_tokens = \
self.kv_cache_manager.get_computed_blocks(
request)
new_computed_blocks, num_new_local_computed_tokens = self.kv_cache_manager.get_computed_blocks(
request
)
# Get externally-cached tokens if using a KVConnector.
if self.connector is not None:
num_external_computed_tokens, load_kv_async = (
self.connector.get_num_new_matched_tokens(
request, num_new_local_computed_tokens))
num_external_computed_tokens, load_kv_async = self.connector.get_num_new_matched_tokens(
request, num_new_local_computed_tokens
)
if num_external_computed_tokens is None:
# The request cannot be scheduled because
@@ -376,13 +373,11 @@ class SchedulerDynamicBatch(Scheduler):
continue
# Total computed tokens (local + external).
num_computed_tokens = (num_new_local_computed_tokens +
num_external_computed_tokens)
num_computed_tokens = num_new_local_computed_tokens + num_external_computed_tokens
# KVTransfer: WAITING reqs have num_computed_tokens > 0
# after async KV recvs are completed.
else:
new_computed_blocks = (
self.kv_cache_manager.create_empty_block_list())
new_computed_blocks = self.kv_cache_manager.create_empty_block_list()
num_new_local_computed_tokens = 0
num_computed_tokens = request.num_computed_tokens
@@ -399,15 +394,12 @@ class SchedulerDynamicBatch(Scheduler):
# `request.num_prompt_tokens` to consider the resumed
# requests, which have output tokens.
num_new_tokens = request.num_tokens - num_computed_tokens
if (0 < self.scheduler_config.long_prefill_token_threshold
< num_new_tokens):
num_new_tokens = (
self.scheduler_config.long_prefill_token_threshold)
if 0 < self.scheduler_config.long_prefill_token_threshold < num_new_tokens:
num_new_tokens = self.scheduler_config.long_prefill_token_threshold
# chunked prefill has to be enabled explicitly to allow
# pooling requests to be chunked
if not self.scheduler_config.enable_chunked_prefill and \
num_new_tokens > token_budget:
if not self.scheduler_config.enable_chunked_prefill and num_new_tokens > token_budget:
self.waiting.pop_request()
skipped_waiting_requests.prepend_request(request)
continue
@@ -417,11 +409,11 @@ class SchedulerDynamicBatch(Scheduler):
# Schedule encoder inputs.
if request.has_encoder_inputs:
(encoder_inputs_to_schedule, num_new_tokens,
new_encoder_compute_budget,
_) = self._try_schedule_encoder_inputs(
request, num_computed_tokens, num_new_tokens,
encoder_compute_budget)
(encoder_inputs_to_schedule, num_new_tokens, new_encoder_compute_budget, _) = (
self._try_schedule_encoder_inputs(
request, num_computed_tokens, num_new_tokens, encoder_compute_budget
)
)
if num_new_tokens == 0:
# The request cannot be scheduled.
break
@@ -431,9 +423,7 @@ class SchedulerDynamicBatch(Scheduler):
# extra block gets allocated which
# creates a mismatch between the number
# of local and remote blocks.
effective_lookahead_tokens = (0 if request.num_computed_tokens
== 0 else
self.num_lookahead_tokens)
effective_lookahead_tokens = 0 if request.num_computed_tokens == 0 else self.num_lookahead_tokens
# Determine if we need to allocate cross-attention blocks.
if self.is_encoder_decoder and request.has_encoder_inputs:
@@ -441,8 +431,7 @@ class SchedulerDynamicBatch(Scheduler):
# always padded to the maximum length. If we support other
# encoder-decoder models, this will need to be updated if we
# want to only allocate what is needed.
num_encoder_tokens =\
self.scheduler_config.max_num_encoder_input_tokens
num_encoder_tokens = self.scheduler_config.max_num_encoder_input_tokens
else:
num_encoder_tokens = 0
@@ -484,20 +473,17 @@ class SchedulerDynamicBatch(Scheduler):
req_index += 1
self.running.append(request)
if self.log_stats:
request.record_event(EngineCoreEventType.SCHEDULED,
scheduled_timestamp)
request.record_event(EngineCoreEventType.SCHEDULED, scheduled_timestamp)
if request.status == RequestStatus.WAITING:
scheduled_new_reqs.append(request)
elif request.status == RequestStatus.PREEMPTED:
scheduled_resumed_reqs.append(request)
else:
raise RuntimeError(
f"Invalid request status: {request.status}")
raise RuntimeError(f"Invalid request status: {request.status}")
if self.lora_config and request.lora_request:
scheduled_loras.add(request.lora_request.lora_int_id)
req_to_new_blocks[request.request_id] = (
self.kv_cache_manager.get_blocks(request.request_id))
req_to_new_blocks[request.request_id] = self.kv_cache_manager.get_blocks(request.request_id)
num_scheduled_tokens[request.request_id] = num_new_tokens
token_budget -= num_new_tokens
request.status = RequestStatus.RUNNING
@@ -507,8 +493,7 @@ class SchedulerDynamicBatch(Scheduler):
request.num_cached_tokens = num_computed_tokens
# Encoder-related.
if encoder_inputs_to_schedule:
scheduled_encoder_inputs[request.request_id] = (
encoder_inputs_to_schedule)
scheduled_encoder_inputs[request.request_id] = encoder_inputs_to_schedule
# Allocate the encoder cache.
for i in encoder_inputs_to_schedule:
self.encoder_cache_manager.allocate(request, i)
@@ -526,22 +511,17 @@ class SchedulerDynamicBatch(Scheduler):
# Since some requests in the RUNNING queue may not be scheduled in
# this step, the total number of scheduled requests can be smaller than
# len(self.running).
assert (len(scheduled_new_reqs) + len(scheduled_resumed_reqs) +
len(scheduled_running_reqs) <= len(self.running))
assert len(scheduled_new_reqs) + len(scheduled_resumed_reqs) + len(scheduled_running_reqs) <= len(self.running)
# Get the longest common prefix among all requests in the running queue.
# This can be potentially used for cascade attention.
num_common_prefix_blocks = [0] * len(
self.kv_cache_config.kv_cache_groups)
num_common_prefix_blocks = [0] * len(self.kv_cache_config.kv_cache_groups)
if self.running:
any_request = self.running[0]
num_common_prefix_blocks = (
self.kv_cache_manager.get_num_common_prefix_blocks(
any_request.request_id))
num_common_prefix_blocks = self.kv_cache_manager.get_num_common_prefix_blocks(any_request.request_id)
# Construct the scheduler output.
new_reqs_data = [
NewRequestData.from_request(
req, req_to_new_blocks[req.request_id].get_block_ids())
NewRequestData.from_request(req, req_to_new_blocks[req.request_id].get_block_ids())
for req in scheduled_new_reqs
]
cached_reqs_data = self._make_cached_request_data(
@@ -564,8 +544,7 @@ class SchedulerDynamicBatch(Scheduler):
# It contains the request IDs that are finished in between
# the previous and the current steps.
finished_req_ids=self.finished_req_ids,
free_encoder_mm_hashes=self.encoder_cache_manager.
get_freed_mm_hashes(),
free_encoder_mm_hashes=self.encoder_cache_manager.get_freed_mm_hashes(),
)
# NOTE(Kuntai): this function is designed for multiple purposes: