cleanup ascend config (#5296)

1. refresh additional config doc
2. move kv config logic to platform.
3. improve `dump_config` init logic and rename it to `dump_config_path`
this change is user impacted. dump_config is changed from dict to
string.
4. correct `enable_async_exponential` type
5. remove useless `chunked_prefill_for_mla`

- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-12-26 14:07:37 +08:00
committed by GitHub
parent adaa89a7a5
commit 29d2fe653d
11 changed files with 98 additions and 118 deletions

View File

@@ -73,10 +73,7 @@ async def test_models(model: str) -> None:
"HCCL_BUFFSIZE": "1024",
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
}
additional_config = {
"chunked_prefill_for_mla": True,
"enable_weight_nz_layout": True
}
additional_config = {"enable_weight_nz_layout": True}
speculative_config = {"num_speculative_tokens": 1, "method": "mtp"}
server_args = [
"--quantization", "ascend", "--data-parallel-size", "2",

View File

@@ -76,10 +76,7 @@ async def test_models(model: str, mode: str) -> None:
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True"
}
speculative_config = {"num_speculative_tokens": 1, "method": "mtp"}
additional_config = {
"chunked_prefill_for_mla": True,
"enable_weight_nz_layout": True
}
additional_config = {"enable_weight_nz_layout": True}
server_args = [
"--quantization", "ascend", "--data-parallel-size", "2",
"--tensor-parallel-size", "8", "--enable-expert-parallel", "--port",

View File

@@ -31,7 +31,7 @@ deployment:
--gpu-memory-utilization 0.9
--enforce-eager
--speculative-config '{"num_speculative_tokens": 1, "method":"mtp"}'
--additional-config '{"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
--additional-config '{"enable_weight_nz_layout":true}'
-
server_cmd: >
@@ -53,5 +53,5 @@ deployment:
--gpu-memory-utilization 0.9
--enforce-eager
--speculative-config '{"num_speculative_tokens": 1, "method":"mtp"}'
--additional-config '{"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
--additional-config '{"enable_weight_nz_layout":true}'
benchmarks:

View File

@@ -62,6 +62,6 @@ def test_qwen3_exponential_overlap() -> None:
max_model_len=8192,
gpu_memory_utilization=0.7,
additional_config={
"enable_async_exponential": 1,
"enable_async_exponential": True,
}) as runner:
runner.generate(example_prompts, sampling_params)