cleanup ascend config (#5296)
1. refresh additional config doc
2. move kv config logic to platform.
3. improve `dump_config` init logic and rename it to `dump_config_path`
this change is user impacted. dump_config is changed from dict to
string.
4. correct `enable_async_exponential` type
5. remove useless `chunked_prefill_for_mla`
- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -1084,3 +1084,34 @@ def dispose_layer(layer: Any):
|
||||
def replace_layer(original_layer: Any, new_layer: Any):
|
||||
original_layer.__class__ = new_layer.__class__
|
||||
original_layer.__dict__ = new_layer.__dict__
|
||||
|
||||
|
||||
def check_kv_extra_config(vllm_config):
|
||||
|
||||
def _check(name: str, config: dict):
|
||||
tp_key = "tp_size"
|
||||
dp_key = "dp_size"
|
||||
if tp_key in config:
|
||||
config_tp = config[tp_key]
|
||||
vllm_tp = vllm_config.parallel_config.tensor_parallel_size
|
||||
if config_tp != vllm_tp:
|
||||
raise ValueError(
|
||||
f"KV transfer '{name}' config has a conflicting tensor parallel size. "
|
||||
f"Expected {vllm_tp}, but got {config_tp}.")
|
||||
if dp_key in config:
|
||||
config_dp = config[dp_key]
|
||||
vllm_dp = vllm_config.parallel_config.data_parallel_size
|
||||
if config_dp != vllm_dp:
|
||||
raise ValueError(
|
||||
f"KV transfer '{name}' config has a conflicting data parallel size. "
|
||||
f"Expected {vllm_dp}, but got {config_dp}.")
|
||||
|
||||
if vllm_config.kv_transfer_config.is_kv_producer:
|
||||
_check(
|
||||
"prefill",
|
||||
vllm_config.kv_transfer_config.get_from_extra_config(
|
||||
"prefill", {}))
|
||||
if vllm_config.kv_transfer_config.is_kv_consumer:
|
||||
_check(
|
||||
"decode",
|
||||
vllm_config.kv_transfer_config.get_from_extra_config("decode", {}))
|
||||
|
||||
Reference in New Issue
Block a user