### What this PR does / why we need it?
| File Path |
| :--- |
| ` vllm_ascend/eplb/adaptor/abstract_adaptor.py` |
| ` vllm_ascend/eplb/adaptor/vllm_adaptor.py` |
| ` vllm_ascend/eplb/core/eplb_device_transfer_loader.py` |
| ` vllm_ascend/eplb/core/eplb_utils.py` |
| ` vllm_ascend/eplb/core/eplb_worker.py` |
| ` vllm_ascend/eplb/core/policy/policy_abstract.py` |
| ` vllm_ascend/eplb/core/policy/policy_default_eplb.py` |
| ` vllm_ascend/eplb/core/policy/policy_factory.py` |
| ` vllm_ascend/eplb/core/policy/policy_flashlb.py` |
| ` vllm_ascend/eplb/core/policy/policy_random.py` |
| ` vllm_ascend/eplb/core/policy/policy_swift_balancer.py` |
| ` vllm_ascend/eplb/eplb_updator.py` |
| ` vllm_ascend/eplb/utils.py` |
| ` vllm_ascend/model_loader/netloader/executor/elastic_load.py` |
| ` vllm_ascend/model_loader/netloader/executor/netloader_pg.py` |
| ` vllm_ascend/model_loader/netloader/interaction/elastic.py` |
| ` vllm_ascend/model_loader/netloader/load.py` |
| ` vllm_ascend/model_loader/netloader/netloader.py` |
| ` vllm_ascend/model_loader/netloader/utils.py` |
| ` vllm_ascend/patch/platform/__init__.py` |
| ` vllm_ascend/patch/platform/patch_balance_schedule.py` |
| ` vllm_ascend/patch/platform/patch_ec_connector.py` |
| ` vllm_ascend/patch/platform/patch_mamba_config.py` |
| ` vllm_ascend/patch/platform/patch_multiproc_executor.py` |
| ` vllm_ascend/patch/platform/patch_sched_yield.py` |
- vLLM version: v0.13.0
- vLLM main:
2c24bc6996
---------
Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
@@ -23,9 +23,8 @@ import vllm_ascend.patch.platform.patch_sched_yield # noqa
|
||||
from vllm_ascend import envs
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if os.getenv("DYNAMIC_EPLB", "false").lower() in ("true", "1") or os.getenv(
|
||||
"EXPERT_MAP_RECORD", "false") == "true":
|
||||
if os.getenv("DYNAMIC_EPLB", "false").lower() in ("true", "1") or os.getenv("EXPERT_MAP_RECORD", "false") == "true":
|
||||
import vllm_ascend.patch.platform.patch_multiproc_executor # noqa
|
||||
|
||||
if envs.VLLM_ASCEND_BALANCE_SCHEDULING and vllm_version_is('0.14.0'):
|
||||
if envs.VLLM_ASCEND_BALANCE_SCHEDULING and vllm_version_is("0.14.0"):
|
||||
import vllm_ascend.patch.platform.patch_balance_schedule # noqa
|
||||
|
||||
@@ -7,17 +7,14 @@ import torch.distributed as dist
|
||||
import vllm
|
||||
from vllm.config import ParallelConfig
|
||||
from vllm.distributed.ec_transfer.ec_connector.base import ECConnectorMetadata
|
||||
from vllm.distributed.kv_transfer.kv_connector.v1.base import \
|
||||
KVConnectorMetadata
|
||||
from vllm.distributed.kv_transfer.kv_connector.v1.base import KVConnectorMetadata
|
||||
from vllm.logger import init_logger
|
||||
from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry
|
||||
from vllm.transformers_utils.config import \
|
||||
maybe_register_config_serialize_by_value
|
||||
from vllm.transformers_utils.config import maybe_register_config_serialize_by_value
|
||||
from vllm.utils.system_utils import decorate_logs, set_process_title
|
||||
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
|
||||
from vllm.v1.core.sched.output import NewRequestData, SchedulerOutput
|
||||
from vllm.v1.core.sched.request_queue import (SchedulingPolicy,
|
||||
create_request_queue)
|
||||
from vllm.v1.core.sched.request_queue import SchedulingPolicy, create_request_queue
|
||||
from vllm.v1.core.sched.scheduler import Scheduler
|
||||
from vllm.v1.engine import EngineCoreEventType, EngineCoreOutputs
|
||||
from vllm.v1.engine.core import DPEngineCoreProc, EngineCoreProc
|
||||
@@ -30,7 +27,6 @@ logger = init_logger(__name__)
|
||||
|
||||
|
||||
class BalanceScheduler(Scheduler):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vllm_config,
|
||||
@@ -41,9 +37,15 @@ class BalanceScheduler(Scheduler):
|
||||
include_finished_set: bool = False,
|
||||
log_stats: bool = False,
|
||||
) -> None:
|
||||
super().__init__(vllm_config, kv_cache_config,
|
||||
structured_output_manager, block_size, mm_registry,
|
||||
include_finished_set, log_stats)
|
||||
super().__init__(
|
||||
vllm_config,
|
||||
kv_cache_config,
|
||||
structured_output_manager,
|
||||
block_size,
|
||||
mm_registry,
|
||||
include_finished_set,
|
||||
log_stats,
|
||||
)
|
||||
# Balance scheduling.
|
||||
self.balance_queue = [
|
||||
torch.tensor([0], dtype=torch.int, device="cpu")
|
||||
@@ -51,9 +53,7 @@ class BalanceScheduler(Scheduler):
|
||||
]
|
||||
|
||||
def balance_gather(self, dp_group):
|
||||
running_tensor = torch.tensor([len(self.running)],
|
||||
dtype=torch.int,
|
||||
device="cpu")
|
||||
running_tensor = torch.tensor([len(self.running)], dtype=torch.int, device="cpu")
|
||||
dist.all_gather(self.balance_queue, running_tensor, group=dp_group)
|
||||
|
||||
def schedule(self) -> SchedulerOutput:
|
||||
@@ -89,33 +89,32 @@ class BalanceScheduler(Scheduler):
|
||||
while req_index < len(self.running) and token_budget > 0:
|
||||
request = self.running[req_index]
|
||||
|
||||
if (request.num_output_placeholders > 0
|
||||
# This is (num_computed_tokens + 1) - (num_output_placeholders - 1).
|
||||
# Since output placeholders are also included in the computed tokens
|
||||
# count, we subtract (num_output_placeholders - 1) to remove any draft
|
||||
# tokens, so that we can be sure no further steps are needed even if
|
||||
# they are all rejected.
|
||||
and request.num_computed_tokens + 2 -
|
||||
request.num_output_placeholders
|
||||
>= request.num_prompt_tokens + request.max_tokens):
|
||||
if (
|
||||
request.num_output_placeholders > 0
|
||||
# This is (num_computed_tokens + 1) - (num_output_placeholders - 1).
|
||||
# Since output placeholders are also included in the computed tokens
|
||||
# count, we subtract (num_output_placeholders - 1) to remove any draft
|
||||
# tokens, so that we can be sure no further steps are needed even if
|
||||
# they are all rejected.
|
||||
and request.num_computed_tokens + 2 - request.num_output_placeholders
|
||||
>= request.num_prompt_tokens + request.max_tokens
|
||||
):
|
||||
# Async scheduling: Avoid scheduling an extra step when we are sure that
|
||||
# the previous step has reached request.max_tokens. We don't schedule
|
||||
# partial draft tokens since this prevents uniform decode optimizations.
|
||||
req_index += 1
|
||||
continue
|
||||
|
||||
num_new_tokens = (request.num_tokens_with_spec +
|
||||
request.num_output_placeholders -
|
||||
request.num_computed_tokens)
|
||||
num_new_tokens = (
|
||||
request.num_tokens_with_spec + request.num_output_placeholders - request.num_computed_tokens
|
||||
)
|
||||
if 0 < self.scheduler_config.long_prefill_token_threshold < num_new_tokens:
|
||||
num_new_tokens = self.scheduler_config.long_prefill_token_threshold
|
||||
num_new_tokens = min(num_new_tokens, token_budget)
|
||||
|
||||
# Make sure the input position does not exceed the max model len.
|
||||
# This is necessary when using spec decoding.
|
||||
num_new_tokens = min(
|
||||
num_new_tokens,
|
||||
self.max_model_len - 1 - request.num_computed_tokens)
|
||||
num_new_tokens = min(num_new_tokens, self.max_model_len - 1 - request.num_computed_tokens)
|
||||
|
||||
# Schedule encoder inputs.
|
||||
encoder_inputs_to_schedule = None
|
||||
@@ -174,20 +173,17 @@ class BalanceScheduler(Scheduler):
|
||||
self.running.remove(preempted_req)
|
||||
if preempted_req in scheduled_running_reqs:
|
||||
scheduled_running_reqs.remove(preempted_req)
|
||||
token_budget += num_scheduled_tokens[
|
||||
preempted_req.request_id]
|
||||
token_budget += num_scheduled_tokens[preempted_req.request_id]
|
||||
req_to_new_blocks.pop(preempted_req.request_id)
|
||||
num_scheduled_tokens.pop(preempted_req.request_id)
|
||||
scheduled_spec_decode_tokens.pop(
|
||||
preempted_req.request_id, None)
|
||||
preempted_encoder_inputs = scheduled_encoder_inputs.pop(
|
||||
preempted_req.request_id, None)
|
||||
scheduled_spec_decode_tokens.pop(preempted_req.request_id, None)
|
||||
preempted_encoder_inputs = scheduled_encoder_inputs.pop(preempted_req.request_id, None)
|
||||
if preempted_encoder_inputs:
|
||||
# Restore encoder compute budget if the preempted
|
||||
# request had encoder inputs scheduled in this step.
|
||||
num_embeds_to_restore = sum(
|
||||
preempted_req.get_num_encoder_embeds(i)
|
||||
for i in preempted_encoder_inputs)
|
||||
preempted_req.get_num_encoder_embeds(i) for i in preempted_encoder_inputs
|
||||
)
|
||||
encoder_compute_budget += num_embeds_to_restore
|
||||
req_index -= 1
|
||||
else:
|
||||
@@ -212,23 +208,20 @@ class BalanceScheduler(Scheduler):
|
||||
|
||||
# Speculative decode related.
|
||||
if request.spec_token_ids:
|
||||
num_scheduled_spec_tokens = (num_new_tokens +
|
||||
request.num_computed_tokens -
|
||||
request.num_tokens -
|
||||
request.num_output_placeholders)
|
||||
num_scheduled_spec_tokens = (
|
||||
num_new_tokens + request.num_computed_tokens - request.num_tokens - request.num_output_placeholders
|
||||
)
|
||||
if num_scheduled_spec_tokens > 0:
|
||||
# Trim spec_token_ids list to num_scheduled_spec_tokens.
|
||||
del request.spec_token_ids[num_scheduled_spec_tokens:]
|
||||
scheduled_spec_decode_tokens[request.request_id] = (
|
||||
request.spec_token_ids)
|
||||
scheduled_spec_decode_tokens[request.request_id] = request.spec_token_ids
|
||||
# New spec tokens will be set in `update_draft_token_ids` before the
|
||||
# next step when applicable.
|
||||
request.spec_token_ids = []
|
||||
|
||||
# Encoder-related.
|
||||
if encoder_inputs_to_schedule:
|
||||
scheduled_encoder_inputs[request.request_id] = (
|
||||
encoder_inputs_to_schedule)
|
||||
scheduled_encoder_inputs[request.request_id] = encoder_inputs_to_schedule
|
||||
# Allocate the encoder cache.
|
||||
for i in encoder_inputs_to_schedule:
|
||||
self.encoder_cache_manager.allocate(request, i)
|
||||
@@ -243,8 +236,10 @@ class BalanceScheduler(Scheduler):
|
||||
scheduled_loras: set[int] = set()
|
||||
if self.lora_config:
|
||||
scheduled_loras = set(
|
||||
req.lora_request.lora_int_id for req in scheduled_running_reqs
|
||||
if req.lora_request and req.lora_request.lora_int_id > 0)
|
||||
req.lora_request.lora_int_id
|
||||
for req in scheduled_running_reqs
|
||||
if req.lora_request and req.lora_request.lora_int_id > 0
|
||||
)
|
||||
assert len(scheduled_loras) <= self.lora_config.max_loras
|
||||
|
||||
# Use a temporary RequestQueue to collect requests that need to be
|
||||
@@ -257,9 +252,7 @@ class BalanceScheduler(Scheduler):
|
||||
if len(self.running) == self.max_num_running_reqs:
|
||||
break
|
||||
|
||||
balance_flag = (max(
|
||||
t.item()
|
||||
for t in self.balance_queue) == self.max_num_running_reqs)
|
||||
balance_flag = max(t.item() for t in self.balance_queue) == self.max_num_running_reqs
|
||||
if balance_flag:
|
||||
break
|
||||
|
||||
@@ -292,9 +285,14 @@ class BalanceScheduler(Scheduler):
|
||||
|
||||
# Check that adding the request still respects the max_loras
|
||||
# constraint.
|
||||
if (self.lora_config and request.lora_request and
|
||||
(len(scheduled_loras) == self.lora_config.max_loras and
|
||||
request.lora_request.lora_int_id not in scheduled_loras)):
|
||||
if (
|
||||
self.lora_config
|
||||
and request.lora_request
|
||||
and (
|
||||
len(scheduled_loras) == self.lora_config.max_loras
|
||||
and request.lora_request.lora_int_id not in scheduled_loras
|
||||
)
|
||||
):
|
||||
# Scheduling would exceed max_loras, skip.
|
||||
self.waiting.pop_request()
|
||||
skipped_waiting_requests.prepend_request(request)
|
||||
@@ -306,14 +304,15 @@ class BalanceScheduler(Scheduler):
|
||||
# Get already-cached tokens.
|
||||
if request.num_computed_tokens == 0:
|
||||
# Get locally-cached tokens.
|
||||
new_computed_blocks, num_new_local_computed_tokens = (
|
||||
self.kv_cache_manager.get_computed_blocks(request))
|
||||
new_computed_blocks, num_new_local_computed_tokens = self.kv_cache_manager.get_computed_blocks(
|
||||
request
|
||||
)
|
||||
|
||||
# Get externally-cached tokens if using a KVConnector.
|
||||
if self.connector is not None:
|
||||
ext_tokens, load_kv_async = (
|
||||
self.connector.get_num_new_matched_tokens(
|
||||
request, num_new_local_computed_tokens))
|
||||
ext_tokens, load_kv_async = self.connector.get_num_new_matched_tokens(
|
||||
request, num_new_local_computed_tokens
|
||||
)
|
||||
|
||||
if ext_tokens is None:
|
||||
# The request cannot be scheduled because
|
||||
@@ -327,8 +326,7 @@ class BalanceScheduler(Scheduler):
|
||||
num_external_computed_tokens = ext_tokens
|
||||
|
||||
# Total computed tokens (local + external).
|
||||
num_computed_tokens = (num_new_local_computed_tokens +
|
||||
num_external_computed_tokens)
|
||||
num_computed_tokens = num_new_local_computed_tokens + num_external_computed_tokens
|
||||
else:
|
||||
# KVTransfer: WAITING reqs have num_computed_tokens > 0
|
||||
# after async KV recvs are completed.
|
||||
@@ -356,8 +354,7 @@ class BalanceScheduler(Scheduler):
|
||||
|
||||
# chunked prefill has to be enabled explicitly to allow
|
||||
# pooling requests to be chunked
|
||||
if (not self.scheduler_config.enable_chunked_prefill
|
||||
and num_new_tokens > token_budget):
|
||||
if not self.scheduler_config.enable_chunked_prefill and num_new_tokens > token_budget:
|
||||
# If chunked_prefill is disabled,
|
||||
# we can stop the scheduling here.
|
||||
break
|
||||
@@ -388,9 +385,7 @@ class BalanceScheduler(Scheduler):
|
||||
# extra block gets allocated which
|
||||
# creates a mismatch between the number
|
||||
# of local and remote blocks.
|
||||
effective_lookahead_tokens = (0 if request.num_computed_tokens
|
||||
== 0 else
|
||||
self.num_lookahead_tokens)
|
||||
effective_lookahead_tokens = 0 if request.num_computed_tokens == 0 else self.num_lookahead_tokens
|
||||
|
||||
# Determine if we need to allocate cross-attention blocks.
|
||||
if self.is_encoder_decoder and request.has_encoder_inputs:
|
||||
@@ -398,8 +393,7 @@ class BalanceScheduler(Scheduler):
|
||||
# always padded to the maximum length. If we support other
|
||||
# encoder-decoder models, this will need to be updated if we
|
||||
# want to only allocate what is needed.
|
||||
num_encoder_tokens = (
|
||||
self.scheduler_config.max_num_encoder_input_tokens)
|
||||
num_encoder_tokens = self.scheduler_config.max_num_encoder_input_tokens
|
||||
else:
|
||||
num_encoder_tokens = 0
|
||||
|
||||
@@ -442,20 +436,17 @@ class BalanceScheduler(Scheduler):
|
||||
|
||||
self.running.append(request)
|
||||
if self.log_stats:
|
||||
request.record_event(EngineCoreEventType.SCHEDULED,
|
||||
scheduled_timestamp)
|
||||
request.record_event(EngineCoreEventType.SCHEDULED, scheduled_timestamp)
|
||||
if request.status == RequestStatus.WAITING:
|
||||
scheduled_new_reqs.append(request)
|
||||
elif request.status == RequestStatus.PREEMPTED:
|
||||
scheduled_resumed_reqs.append(request)
|
||||
else:
|
||||
raise RuntimeError(
|
||||
f"Invalid request status: {request.status}")
|
||||
raise RuntimeError(f"Invalid request status: {request.status}")
|
||||
|
||||
if self.lora_config and request.lora_request:
|
||||
scheduled_loras.add(request.lora_request.lora_int_id)
|
||||
req_to_new_blocks[request.request_id] = (
|
||||
self.kv_cache_manager.get_blocks(request.request_id))
|
||||
req_to_new_blocks[request.request_id] = self.kv_cache_manager.get_blocks(request.request_id)
|
||||
num_scheduled_tokens[request.request_id] = num_new_tokens
|
||||
token_budget -= num_new_tokens
|
||||
request.status = RequestStatus.RUNNING
|
||||
@@ -465,8 +456,7 @@ class BalanceScheduler(Scheduler):
|
||||
request.num_cached_tokens = num_computed_tokens
|
||||
# Encoder-related.
|
||||
if encoder_inputs_to_schedule:
|
||||
scheduled_encoder_inputs[request.request_id] = (
|
||||
encoder_inputs_to_schedule)
|
||||
scheduled_encoder_inputs[request.request_id] = encoder_inputs_to_schedule
|
||||
# Allocate the encoder cache.
|
||||
for i in encoder_inputs_to_schedule:
|
||||
self.encoder_cache_manager.allocate(request, i)
|
||||
@@ -476,8 +466,7 @@ class BalanceScheduler(Scheduler):
|
||||
for i in external_load_encoder_input:
|
||||
self.encoder_cache_manager.allocate(request, i)
|
||||
if self.ec_connector is not None:
|
||||
self.ec_connector.update_state_after_alloc(
|
||||
request, i)
|
||||
self.ec_connector.update_state_after_alloc(request, i)
|
||||
# Put back any skipped requests at the head of the waiting queue
|
||||
if skipped_waiting_requests:
|
||||
self.waiting.prepend_requests(skipped_waiting_requests)
|
||||
@@ -491,20 +480,15 @@ class BalanceScheduler(Scheduler):
|
||||
# Since some requests in the RUNNING queue may not be scheduled in
|
||||
# this step, the total number of scheduled requests can be smaller than
|
||||
# len(self.running).
|
||||
assert len(scheduled_new_reqs) + len(scheduled_resumed_reqs) + len(
|
||||
scheduled_running_reqs) <= len(self.running)
|
||||
assert len(scheduled_new_reqs) + len(scheduled_resumed_reqs) + len(scheduled_running_reqs) <= len(self.running)
|
||||
|
||||
# Get the longest common prefix among all requests in the running queue.
|
||||
# This can be potentially used for cascade attention.
|
||||
num_common_prefix_blocks = [0] * len(
|
||||
self.kv_cache_config.kv_cache_groups)
|
||||
with record_function_or_nullcontext(
|
||||
"schedule: get_num_common_prefix_blocks"):
|
||||
num_common_prefix_blocks = [0] * len(self.kv_cache_config.kv_cache_groups)
|
||||
with record_function_or_nullcontext("schedule: get_num_common_prefix_blocks"):
|
||||
if self.running:
|
||||
any_request = self.running[0]
|
||||
num_common_prefix_blocks = (
|
||||
self.kv_cache_manager.get_num_common_prefix_blocks(
|
||||
any_request.request_id))
|
||||
num_common_prefix_blocks = self.kv_cache_manager.get_num_common_prefix_blocks(any_request.request_id)
|
||||
|
||||
# Construct the scheduler output.
|
||||
if self.use_v2_model_runner:
|
||||
@@ -515,17 +499,16 @@ class BalanceScheduler(Scheduler):
|
||||
req,
|
||||
req_to_new_blocks[req.request_id].get_block_ids(),
|
||||
req._all_token_ids,
|
||||
) for req in scheduled_new_reqs
|
||||
)
|
||||
for req in scheduled_new_reqs
|
||||
]
|
||||
else:
|
||||
new_reqs_data = [
|
||||
NewRequestData.from_request(
|
||||
req, req_to_new_blocks[req.request_id].get_block_ids())
|
||||
NewRequestData.from_request(req, req_to_new_blocks[req.request_id].get_block_ids())
|
||||
for req in scheduled_new_reqs
|
||||
]
|
||||
|
||||
with record_function_or_nullcontext(
|
||||
"schedule: make_cached_request_data"):
|
||||
with record_function_or_nullcontext("schedule: make_cached_request_data"):
|
||||
cached_reqs_data = self._make_cached_request_data(
|
||||
scheduled_running_reqs,
|
||||
scheduled_resumed_reqs,
|
||||
@@ -546,15 +529,13 @@ class BalanceScheduler(Scheduler):
|
||||
scheduled_spec_decode_tokens=scheduled_spec_decode_tokens,
|
||||
scheduled_encoder_inputs=scheduled_encoder_inputs,
|
||||
num_common_prefix_blocks=num_common_prefix_blocks,
|
||||
preempted_req_ids={req.request_id
|
||||
for req in preempted_reqs},
|
||||
preempted_req_ids={req.request_id for req in preempted_reqs},
|
||||
# finished_req_ids is an existing state in the scheduler,
|
||||
# instead of being newly scheduled in this step.
|
||||
# It contains the request IDs that are finished in between
|
||||
# the previous and the current steps.
|
||||
finished_req_ids=self.finished_req_ids,
|
||||
free_encoder_mm_hashes=self.encoder_cache_manager.
|
||||
get_freed_mm_hashes(),
|
||||
free_encoder_mm_hashes=self.encoder_cache_manager.get_freed_mm_hashes(),
|
||||
)
|
||||
|
||||
# NOTE(Kuntai): this function is designed for multiple purposes:
|
||||
@@ -562,14 +543,12 @@ class BalanceScheduler(Scheduler):
|
||||
# 2. Wrap up all the KV cache load / save ops into an opaque object
|
||||
# 3. Clear the internal states of the connector
|
||||
if self.connector is not None:
|
||||
meta: KVConnectorMetadata = self.connector.build_connector_meta(
|
||||
scheduler_output)
|
||||
meta: KVConnectorMetadata = self.connector.build_connector_meta(scheduler_output)
|
||||
scheduler_output.kv_connector_metadata = meta
|
||||
|
||||
# Build the connector meta for ECConnector
|
||||
if self.ec_connector is not None:
|
||||
ec_meta: ECConnectorMetadata = self.ec_connector.build_connector_meta(
|
||||
scheduler_output)
|
||||
ec_meta: ECConnectorMetadata = self.ec_connector.build_connector_meta(scheduler_output)
|
||||
scheduler_output.ec_connector_metadata = ec_meta
|
||||
|
||||
with record_function_or_nullcontext("schedule: update_after_schedule"):
|
||||
@@ -578,7 +557,6 @@ class BalanceScheduler(Scheduler):
|
||||
|
||||
|
||||
class BalanceDPEngineCoreProc(DPEngineCoreProc):
|
||||
|
||||
def run_busy_loop(self):
|
||||
"""Core busy loop of the EngineCore for data parallel case."""
|
||||
|
||||
@@ -602,23 +580,23 @@ class BalanceDPEngineCoreProc(DPEngineCoreProc):
|
||||
self.execute_dummy_batch()
|
||||
|
||||
# 3) All-reduce operation to determine global unfinished reqs.
|
||||
self.engines_running = self._has_global_unfinished_reqs(
|
||||
local_unfinished_reqs)
|
||||
self.engines_running = self._has_global_unfinished_reqs(local_unfinished_reqs)
|
||||
self.scheduler.balance_gather(self.dp_group)
|
||||
|
||||
if not self.engines_running:
|
||||
if self.dp_rank == 0 or not self.has_coordinator:
|
||||
# Notify client that we are pausing the loop.
|
||||
logger.debug("Wave %d finished, pausing engine loop.",
|
||||
self.current_wave)
|
||||
logger.debug("Wave %d finished, pausing engine loop.", self.current_wave)
|
||||
# In the coordinator case, dp rank 0 sends updates to the
|
||||
# coordinator. Otherwise (offline spmd case), each rank
|
||||
# sends the update to its colocated front-end process.
|
||||
client_index = -1 if self.has_coordinator else 0
|
||||
self.output_queue.put_nowait((
|
||||
client_index,
|
||||
EngineCoreOutputs(wave_complete=self.current_wave),
|
||||
))
|
||||
self.output_queue.put_nowait(
|
||||
(
|
||||
client_index,
|
||||
EngineCoreOutputs(wave_complete=self.current_wave),
|
||||
)
|
||||
)
|
||||
# Increment wave count and reset step counter.
|
||||
self.current_wave += 1
|
||||
self.step_counter = 0
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
import vllm.distributed.ec_transfer.ec_connector.example_connector
|
||||
from safetensors.torch import load_file
|
||||
from vllm.distributed.ec_transfer.ec_connector.example_connector import (
|
||||
ECConnectorMetadata, ECExampleConnector)
|
||||
from vllm.distributed.ec_transfer.ec_connector.example_connector import ECConnectorMetadata, ECExampleConnector
|
||||
from vllm.logger import logger
|
||||
|
||||
|
||||
class AscendECExampleConnector(ECExampleConnector):
|
||||
|
||||
def start_load_caches(self, encoder_cache, **kwargs) -> None:
|
||||
metadata: ECConnectorMetadata = self._get_connector_metadata()
|
||||
assert isinstance(metadata, ECConnectorMetadata)
|
||||
assert encoder_cache is not None
|
||||
if metadata is None:
|
||||
logger.warning((
|
||||
"In connector.start_load_caches, ",
|
||||
"but the connector metadata is None",
|
||||
))
|
||||
logger.warning(
|
||||
(
|
||||
"In connector.start_load_caches, ",
|
||||
"but the connector metadata is None",
|
||||
)
|
||||
)
|
||||
return
|
||||
# Load the EC for each mm data
|
||||
for mm_data in metadata.mm_datas:
|
||||
@@ -24,8 +24,7 @@ class AscendECExampleConnector(ECExampleConnector):
|
||||
filename = self._generate_filename_debug(mm_data.mm_hash)
|
||||
ec_cache = load_file(filename)["ec_cache"].npu()
|
||||
encoder_cache[mm_data.mm_hash] = ec_cache
|
||||
logger.debug("Success load encoder cache for hash %s",
|
||||
mm_data.mm_hash)
|
||||
logger.debug("Success load encoder cache for hash %s", mm_data.mm_hash)
|
||||
|
||||
|
||||
vllm.distributed.ec_transfer.ec_connector.example_connector.ECExampleConnector = AscendECExampleConnector
|
||||
|
||||
@@ -38,7 +38,8 @@ def verify_and_update_config(cls, vllm_config) -> None:
|
||||
block_size=1,
|
||||
num_kv_heads=model_config.get_num_kv_heads(parallel_config),
|
||||
head_size=model_config.get_head_size(),
|
||||
dtype=kv_cache_dtype).page_size_bytes
|
||||
dtype=kv_cache_dtype,
|
||||
).page_size_bytes
|
||||
|
||||
model_cls, _ = ModelRegistry.resolve_model_cls(
|
||||
model_config.architecture,
|
||||
@@ -58,23 +59,20 @@ def verify_and_update_config(cls, vllm_config) -> None:
|
||||
# block size to multiple of 16, so let's suggest a value
|
||||
# that would work (note: FA is currently not compatible
|
||||
# with mamba layers, use FlashInfer instead).
|
||||
attn_block_size = block_alignment_bytes * cdiv(
|
||||
mamba_page_size, block_alignment_bytes * attn_page_size_1_token)
|
||||
attn_block_size = block_alignment_bytes * cdiv(mamba_page_size, block_alignment_bytes * attn_page_size_1_token)
|
||||
|
||||
# override attention block size if either (a) the
|
||||
# user has not set it or (b) the user has set it
|
||||
# too small.
|
||||
if (cache_config.block_size is None
|
||||
or cache_config.block_size < attn_block_size):
|
||||
if cache_config.block_size is None or cache_config.block_size < attn_block_size:
|
||||
cache_config.block_size = attn_block_size
|
||||
logger.info(
|
||||
"Setting attention block size to %d tokens "
|
||||
"to ensure that attention page size is >= mamba page size.",
|
||||
attn_block_size)
|
||||
"Setting attention block size to %d tokens to ensure that attention page size is >= mamba page size.",
|
||||
attn_block_size,
|
||||
)
|
||||
|
||||
# compute new attention page size
|
||||
attn_page_size = \
|
||||
cache_config.block_size * attn_page_size_1_token
|
||||
attn_page_size = cache_config.block_size * attn_page_size_1_token
|
||||
|
||||
assert attn_page_size >= mamba_page_size
|
||||
|
||||
@@ -83,15 +81,15 @@ def verify_and_update_config(cls, vllm_config) -> None:
|
||||
return
|
||||
|
||||
# pad mamba page size to exactly match attention
|
||||
if (cache_config.mamba_page_size_padded is None
|
||||
or cache_config.mamba_page_size_padded != attn_page_size):
|
||||
cache_config.mamba_page_size_padded = (attn_page_size)
|
||||
mamba_padding_pct = 100 * (attn_page_size -
|
||||
mamba_page_size) / mamba_page_size
|
||||
if cache_config.mamba_page_size_padded is None or cache_config.mamba_page_size_padded != attn_page_size:
|
||||
cache_config.mamba_page_size_padded = attn_page_size
|
||||
mamba_padding_pct = 100 * (attn_page_size - mamba_page_size) / mamba_page_size
|
||||
logger.info(
|
||||
"Padding mamba page size by %.2f%% to ensure "
|
||||
"that mamba page size and attention page size are "
|
||||
"exactly equal.", mamba_padding_pct)
|
||||
"exactly equal.",
|
||||
mamba_padding_pct,
|
||||
)
|
||||
|
||||
|
||||
vllm.model_executor.models.config.HybridAttentionMambaModelConfig.verify_and_update_config = verify_and_update_config
|
||||
|
||||
@@ -7,19 +7,20 @@ from multiprocessing.synchronize import Lock as LockType
|
||||
import vllm.v1.executor.multiproc_executor
|
||||
from vllm import envs
|
||||
from vllm.config import VllmConfig
|
||||
from vllm.distributed.device_communicators.shm_broadcast import (Handle,
|
||||
MessageQueue)
|
||||
from vllm.utils.network_utils import (get_distributed_init_method,
|
||||
get_loopback_ip, get_open_port)
|
||||
from vllm.distributed.device_communicators.shm_broadcast import Handle, MessageQueue
|
||||
from vllm.utils.network_utils import get_distributed_init_method, get_loopback_ip, get_open_port
|
||||
from vllm.utils.system_utils import get_mp_context
|
||||
from vllm.v1.executor.abstract import FailureCallback
|
||||
from vllm.v1.executor.multiproc_executor import (
|
||||
FutureWrapper, MultiprocExecutor, UnreadyWorkerProcHandle, WorkerProc,
|
||||
set_multiprocessing_worker_envs)
|
||||
FutureWrapper,
|
||||
MultiprocExecutor,
|
||||
UnreadyWorkerProcHandle,
|
||||
WorkerProc,
|
||||
set_multiprocessing_worker_envs,
|
||||
)
|
||||
|
||||
|
||||
class AscendMultiprocExecutor(MultiprocExecutor):
|
||||
|
||||
def _init_executor(self) -> None:
|
||||
# Call self.shutdown at exit to clean up
|
||||
# and ensure workers will be terminated.
|
||||
@@ -32,7 +33,8 @@ class AscendMultiprocExecutor(MultiprocExecutor):
|
||||
assert self.world_size % self.parallel_config.nnodes_within_dp == 0, (
|
||||
f"global world_size ({self.parallel_config.world_size}) must be "
|
||||
f"divisible by nnodes_within_dp "
|
||||
f"({self.parallel_config.nnodes_within_dp}). ")
|
||||
f"({self.parallel_config.nnodes_within_dp}). "
|
||||
)
|
||||
self.local_world_size = self.parallel_config.local_world_size
|
||||
tensor_parallel_size = self.parallel_config.tensor_parallel_size
|
||||
pp_parallel_size = self.parallel_config.pipeline_parallel_size
|
||||
@@ -41,7 +43,8 @@ class AscendMultiprocExecutor(MultiprocExecutor):
|
||||
f"world_size ({self.world_size}) must be equal to the "
|
||||
f"tensor_parallel_size ({tensor_parallel_size}) x pipeline"
|
||||
f"_parallel_size ({pp_parallel_size}) x prefill_context"
|
||||
f"_parallel_size ({pcp_parallel_size}). ")
|
||||
f"_parallel_size ({pcp_parallel_size}). "
|
||||
)
|
||||
|
||||
# Set multiprocessing envs
|
||||
set_multiprocessing_worker_envs()
|
||||
@@ -49,8 +52,7 @@ class AscendMultiprocExecutor(MultiprocExecutor):
|
||||
# Multiprocessing-based executor does not support multi-node setting.
|
||||
# Since it only works for single node, we can use the loopback address
|
||||
# get_loopback_ip() for communication.
|
||||
distributed_init_method = get_distributed_init_method(
|
||||
get_loopback_ip(), get_open_port())
|
||||
distributed_init_method = get_distributed_init_method(get_loopback_ip(), get_open_port())
|
||||
self.rpc_broadcast_mq: MessageQueue | None = None
|
||||
scheduler_output_handle: Handle | None = None
|
||||
# Initialize worker and set up message queues for SchedulerOutputs
|
||||
@@ -72,8 +74,7 @@ class AscendMultiprocExecutor(MultiprocExecutor):
|
||||
unready_workers: list[UnreadyWorkerProcHandle] = []
|
||||
success = False
|
||||
try:
|
||||
global_start_rank = (self.local_world_size *
|
||||
self.parallel_config.node_rank_within_dp)
|
||||
global_start_rank = self.local_world_size * self.parallel_config.node_rank_within_dp
|
||||
for local_rank in range(self.local_world_size):
|
||||
global_rank = global_start_rank + local_rank
|
||||
unready_workers.append(
|
||||
@@ -84,7 +85,8 @@ class AscendMultiprocExecutor(MultiprocExecutor):
|
||||
distributed_init_method=distributed_init_method,
|
||||
input_shm_handle=scheduler_output_handle,
|
||||
shared_worker_lock=shared_worker_lock,
|
||||
))
|
||||
)
|
||||
)
|
||||
|
||||
# Workers must be created before wait_for_ready to avoid
|
||||
# deadlock, since worker.init_device() does a device sync.
|
||||
@@ -101,13 +103,11 @@ class AscendMultiprocExecutor(MultiprocExecutor):
|
||||
if self.parallel_config.node_rank_within_dp == 0:
|
||||
for rank in range(self.world_size):
|
||||
if rank < self.local_world_size:
|
||||
local_message_queue = self.workers[
|
||||
rank].worker_response_mq
|
||||
local_message_queue = self.workers[rank].worker_response_mq
|
||||
assert local_message_queue is not None
|
||||
self.response_mqs.append(local_message_queue)
|
||||
else:
|
||||
remote_message_queue = self.workers[
|
||||
0].peer_worker_response_mqs[rank]
|
||||
remote_message_queue = self.workers[0].peer_worker_response_mqs[rank]
|
||||
assert remote_message_queue is not None
|
||||
self.response_mqs.append(remote_message_queue)
|
||||
|
||||
@@ -128,8 +128,7 @@ class AscendMultiprocExecutor(MultiprocExecutor):
|
||||
for uw in unready_workers:
|
||||
if uw.death_writer is not None:
|
||||
uw.death_writer.close()
|
||||
self._ensure_worker_termination(
|
||||
[uw.proc for uw in unready_workers])
|
||||
self._ensure_worker_termination([uw.proc for uw in unready_workers])
|
||||
|
||||
self.futures_queue = deque[tuple[FutureWrapper, Callable]]()
|
||||
|
||||
@@ -137,7 +136,6 @@ class AscendMultiprocExecutor(MultiprocExecutor):
|
||||
|
||||
|
||||
class AscendWorkerProc(WorkerProc):
|
||||
|
||||
@staticmethod
|
||||
def make_worker_process(
|
||||
vllm_config: VllmConfig,
|
||||
|
||||
@@ -3,11 +3,10 @@ import sys
|
||||
import vllm.distributed.utils
|
||||
from vllm.platforms import CpuArchEnum, Platform
|
||||
|
||||
is_arm = (Platform.get_cpu_architecture() == CpuArchEnum.ARM)
|
||||
is_arm = Platform.get_cpu_architecture() == CpuArchEnum.ARM
|
||||
|
||||
USE_SCHED_YIELD = (
|
||||
((sys.version_info[:3] >= (3, 11, 1)) or
|
||||
(sys.version_info[:2] == (3, 10) and sys.version_info[2] >= 8))
|
||||
and not is_arm)
|
||||
(sys.version_info[:3] >= (3, 11, 1)) or (sys.version_info[:2] == (3, 10) and sys.version_info[2] >= 8)
|
||||
) and not is_arm
|
||||
|
||||
vllm.distributed.utils.USE_SCHED_YIELD = USE_SCHED_YIELD
|
||||
|
||||
Reference in New Issue
Block a user