cleanup ascend config (#5296)
1. refresh additional config doc
2. move kv config logic to platform.
3. improve `dump_config` init logic and rename it to `dump_config_path`
this change is user impacted. dump_config is changed from dict to
string.
4. correct `enable_async_exponential` type
5. remove useless `chunked_prefill_for_mla`
- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -14,43 +14,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Optional
|
||||
from uuid import uuid4
|
||||
|
||||
from vllm.logger import logger
|
||||
from vllm.triton_utils import HAS_TRITON
|
||||
|
||||
|
||||
def check_kv_extra_config(vllm_config):
|
||||
|
||||
def _check(name: str, config: dict):
|
||||
tp_key = "tp_size"
|
||||
dp_key = "dp_size"
|
||||
if tp_key in config:
|
||||
config_tp = config[tp_key]
|
||||
vllm_tp = vllm_config.parallel_config.tensor_parallel_size
|
||||
if config_tp != vllm_tp:
|
||||
raise ValueError(
|
||||
f"KV transfer '{name}' config has a conflicting tensor parallel size. "
|
||||
f"Expected {vllm_tp}, but got {config_tp}.")
|
||||
if dp_key in config:
|
||||
config_dp = config[dp_key]
|
||||
vllm_dp = vllm_config.parallel_config.data_parallel_size
|
||||
if config_dp != vllm_dp:
|
||||
raise ValueError(
|
||||
f"KV transfer '{name}' config has a conflicting data parallel size. "
|
||||
f"Expected {vllm_dp}, but got {config_dp}.")
|
||||
|
||||
if vllm_config.kv_transfer_config.is_kv_producer:
|
||||
_check(
|
||||
"prefill",
|
||||
vllm_config.kv_transfer_config.get_from_extra_config(
|
||||
"prefill", {}))
|
||||
if vllm_config.kv_transfer_config.is_kv_consumer:
|
||||
_check(
|
||||
"decode",
|
||||
vllm_config.kv_transfer_config.get_from_extra_config("decode", {}))
|
||||
|
||||
|
||||
class AscendConfig:
|
||||
"""
|
||||
Configuration Object for additional_config from vllm.configs.
|
||||
@@ -74,8 +42,7 @@ class AscendConfig:
|
||||
finegrained_tp_config, vllm_config)
|
||||
|
||||
# Dump / PrecisionDebugger configuration
|
||||
dump_config_path = additional_config.get("dump_config", None)
|
||||
self.dump_config = DumpConfig(dump_config_path)
|
||||
self.dump_config_path = additional_config.get("dump_config_path", None)
|
||||
|
||||
weight_prefetch_config = additional_config.get(
|
||||
"weight_prefetch_config", {})
|
||||
@@ -96,8 +63,6 @@ class AscendConfig:
|
||||
self.gate_eplb = additional_config.get("gate_eplb", False)
|
||||
self.num_wait_worker_iterations = additional_config.get(
|
||||
"num_wait_worker_iterations", 30)
|
||||
self.chunked_prefill_for_mla = additional_config.get(
|
||||
"chunked_prefill_for_mla", False)
|
||||
self.enable_shared_expert_dp = additional_config.get(
|
||||
"enable_shared_expert_dp",
|
||||
False) and vllm_config.parallel_config.enable_expert_parallel
|
||||
@@ -114,9 +79,6 @@ class AscendConfig:
|
||||
self.enable_cpu_binding = additional_config.get(
|
||||
"enable_cpu_binding", False)
|
||||
|
||||
if vllm_config.kv_transfer_config is not None:
|
||||
check_kv_extra_config(vllm_config)
|
||||
|
||||
self.pd_tp_ratio = 1
|
||||
self.pd_head_ratio = 1
|
||||
self.num_head_replica = 1
|
||||
@@ -156,16 +118,8 @@ class AscendConfig:
|
||||
# npu_fused_infer_attention_score performs better on all scenarios.
|
||||
self.pa_shape_list = additional_config.get("pa_shape_list", [])
|
||||
|
||||
kv_cfg = vllm_config.kv_transfer_config
|
||||
if kv_cfg is not None and not getattr(kv_cfg, "_engine_id_patched",
|
||||
False):
|
||||
kv_cfg.engine_id = f"{kv_cfg.engine_id}-{uuid4().hex}"
|
||||
kv_cfg._engine_id_patched = True
|
||||
self.enable_async_exponential = additional_config.get(
|
||||
"enable_async_exponential", 0)
|
||||
if self.enable_async_exponential not in (0, 1):
|
||||
raise AssertionError(
|
||||
"Enable async exponential can only be set to 0 or 1.")
|
||||
self.enable_async_exponential = bool(
|
||||
additional_config.get("enable_async_exponential", False))
|
||||
|
||||
|
||||
class FinegrainedTPConfig:
|
||||
@@ -274,18 +228,6 @@ class XliteGraphConfig:
|
||||
)
|
||||
|
||||
|
||||
class DumpConfig:
|
||||
"""
|
||||
Configuration object for dump/PrecisionDebugger settings.
|
||||
"""
|
||||
|
||||
def __init__(self, dump_config_path: Optional[str] = None):
|
||||
# enable_dump is True when dump_cfg exists and config_path is not empty
|
||||
self.enable_dump: bool = bool(dump_config_path)
|
||||
# Path to msprobe config json; may be None.
|
||||
self.config_path: Optional[str] = dump_config_path
|
||||
|
||||
|
||||
class WeightPrefetchConfig:
|
||||
"""
|
||||
Configuration Object for weight_prefetch_config from additional_config
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
import gc
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
from uuid import uuid4
|
||||
|
||||
import torch
|
||||
from vllm.logger import logger
|
||||
@@ -30,12 +31,11 @@ from vllm_ascend.ascend_config import init_ascend_config
|
||||
from vllm_ascend.utils import refresh_block_size
|
||||
|
||||
# isort: off
|
||||
from vllm_ascend.utils import (ASCEND_QUANTIZATION_METHOD,
|
||||
COMPRESSED_TENSORS_METHOD, AscendDeviceType,
|
||||
enable_sp, get_ascend_device_type, is_vl_model,
|
||||
update_aclgraph_sizes,
|
||||
update_cudagraph_capture_sizes,
|
||||
update_default_aclgraph_sizes)
|
||||
from vllm_ascend.utils import (
|
||||
ASCEND_QUANTIZATION_METHOD, COMPRESSED_TENSORS_METHOD, AscendDeviceType,
|
||||
enable_sp, get_ascend_device_type, is_vl_model, update_aclgraph_sizes,
|
||||
update_cudagraph_capture_sizes, update_default_aclgraph_sizes,
|
||||
check_kv_extra_config)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from vllm.config import ModelConfig, VllmConfig
|
||||
@@ -152,6 +152,12 @@ class NPUPlatform(Platform):
|
||||
# initialize ascend config from vllm additional_config
|
||||
ascend_config = init_ascend_config(vllm_config)
|
||||
|
||||
if vllm_config.kv_transfer_config is not None:
|
||||
check_kv_extra_config(vllm_config)
|
||||
if not getattr(vllm_config.kv_transfer_config,
|
||||
"_engine_id_patched", False):
|
||||
vllm_config.kv_transfer_config.engine_id = f"{vllm_config.kv_transfer_config.engine_id}-{uuid4().hex}"
|
||||
vllm_config.kv_transfer_config._engine_id_patched = True
|
||||
from vllm.config import CompilationMode # noqa: E402
|
||||
|
||||
compilation_config = vllm_config.compilation_config
|
||||
|
||||
@@ -83,7 +83,7 @@ class AscendTopKTopPSampler(TopKTopPSampler):
|
||||
logits_to_return = logits.log_softmax(dim=-1, dtype=torch.float32)
|
||||
|
||||
probs = logits.softmax(dim=-1, dtype=torch.float32)
|
||||
if get_ascend_config().enable_async_exponential == 1:
|
||||
if get_ascend_config().enable_async_exponential:
|
||||
# Add synchronize to prevent synchronize error.
|
||||
self.async_event.synchronize()
|
||||
return probs.div_(self.q).argmax(dim=-1).view(-1), logits_to_return
|
||||
|
||||
@@ -1084,3 +1084,34 @@ def dispose_layer(layer: Any):
|
||||
def replace_layer(original_layer: Any, new_layer: Any):
|
||||
original_layer.__class__ = new_layer.__class__
|
||||
original_layer.__dict__ = new_layer.__dict__
|
||||
|
||||
|
||||
def check_kv_extra_config(vllm_config):
|
||||
|
||||
def _check(name: str, config: dict):
|
||||
tp_key = "tp_size"
|
||||
dp_key = "dp_size"
|
||||
if tp_key in config:
|
||||
config_tp = config[tp_key]
|
||||
vllm_tp = vllm_config.parallel_config.tensor_parallel_size
|
||||
if config_tp != vllm_tp:
|
||||
raise ValueError(
|
||||
f"KV transfer '{name}' config has a conflicting tensor parallel size. "
|
||||
f"Expected {vllm_tp}, but got {config_tp}.")
|
||||
if dp_key in config:
|
||||
config_dp = config[dp_key]
|
||||
vllm_dp = vllm_config.parallel_config.data_parallel_size
|
||||
if config_dp != vllm_dp:
|
||||
raise ValueError(
|
||||
f"KV transfer '{name}' config has a conflicting data parallel size. "
|
||||
f"Expected {vllm_dp}, but got {config_dp}.")
|
||||
|
||||
if vllm_config.kv_transfer_config.is_kv_producer:
|
||||
_check(
|
||||
"prefill",
|
||||
vllm_config.kv_transfer_config.get_from_extra_config(
|
||||
"prefill", {}))
|
||||
if vllm_config.kv_transfer_config.is_kv_consumer:
|
||||
_check(
|
||||
"decode",
|
||||
vllm_config.kv_transfer_config.get_from_extra_config("decode", {}))
|
||||
|
||||
@@ -216,13 +216,12 @@ class NPUModelRunner(GPUModelRunner):
|
||||
self.ascend_config = get_ascend_config()
|
||||
set_weight_prefetch_method(self.ascend_config.weight_prefetch_config)
|
||||
# Dump / PrecisionDebugger configuration now comes from AscendConfig
|
||||
dump_cfg = self.ascend_config.dump_config
|
||||
self.dump_enable = dump_cfg.enable_dump
|
||||
dump_cfg = self.ascend_config.dump_config_path
|
||||
self.debugger = None
|
||||
if self.dump_enable:
|
||||
if dump_cfg is not None:
|
||||
if self.model_config.enforce_eager:
|
||||
from msprobe.pytorch import PrecisionDebugger
|
||||
self.debugger = PrecisionDebugger(dump_cfg.config_path)
|
||||
self.debugger = PrecisionDebugger(dump_cfg)
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"Dumping/debugging only works in eager mode.")
|
||||
@@ -1388,9 +1387,7 @@ class NPUModelRunner(GPUModelRunner):
|
||||
self.eplb_updator.take_update_info_from_eplb_process()
|
||||
|
||||
# prevent debugger is None
|
||||
need_dump = self.dump_enable and self.debugger is not None
|
||||
if need_dump:
|
||||
assert self.debugger is not None
|
||||
if self.debugger is not None:
|
||||
dbg_cfg = getattr(self.debugger, "config", None)
|
||||
dump_level = str(
|
||||
getattr(dbg_cfg, "level",
|
||||
@@ -1407,7 +1404,7 @@ class NPUModelRunner(GPUModelRunner):
|
||||
aclgraph_runtime_mode, batch_descriptor = \
|
||||
self.cudagraph_dispatcher.dispatch(num_tokens=num_input_tokens, uniform_decode=uniform_decode, has_lora=has_lora)
|
||||
|
||||
if self.ascend_config.enable_async_exponential != 0:
|
||||
if self.ascend_config.enable_async_exponential:
|
||||
self.sampler.do_async_exponential(
|
||||
b_s=logits_indices.shape[0],
|
||||
head_dim=self.model_config.get_vocab_size(),
|
||||
@@ -1457,8 +1454,7 @@ class NPUModelRunner(GPUModelRunner):
|
||||
if not broadcast_pp_output:
|
||||
hidden_states.kv_connector_output = kv_connector_output
|
||||
self.kv_connector_output = kv_connector_output
|
||||
if need_dump:
|
||||
assert self.debugger is not None
|
||||
if self.debugger is not None:
|
||||
self.debugger.stop()
|
||||
self.debugger.step()
|
||||
return hidden_states
|
||||
@@ -1472,8 +1468,7 @@ class NPUModelRunner(GPUModelRunner):
|
||||
hidden_states,
|
||||
scheduler_output.total_num_scheduled_tokens,
|
||||
num_scheduled_tokens_np)
|
||||
if need_dump:
|
||||
assert self.debugger is not None
|
||||
if self.debugger is not None:
|
||||
self.debugger.stop()
|
||||
self.debugger.step()
|
||||
return pool_output
|
||||
@@ -1529,7 +1524,6 @@ class NPUModelRunner(GPUModelRunner):
|
||||
output.kv_connector_output = kv_connector_output
|
||||
return output
|
||||
|
||||
need_dump = self.dump_enable and self.debugger is not None
|
||||
# Unpack ephemeral state.
|
||||
(
|
||||
scheduler_output,
|
||||
@@ -1628,13 +1622,13 @@ class NPUModelRunner(GPUModelRunner):
|
||||
if self.dynamic_eplb:
|
||||
self.eplb_updator.forward_end()
|
||||
if not self.use_async_scheduling:
|
||||
if need_dump:
|
||||
if self.debugger is not None:
|
||||
assert self.debugger is not None
|
||||
self.debugger.stop()
|
||||
self.debugger.step()
|
||||
return model_runner_output
|
||||
|
||||
if need_dump:
|
||||
if self.debugger is not None:
|
||||
assert self.debugger is not None
|
||||
self.debugger.stop()
|
||||
self.debugger.step()
|
||||
|
||||
Reference in New Issue
Block a user