[Refactor] MLP weight prefetch to consistency with MoE Model's prefetching in terms of code and usage (#6442)

### What this PR does / why we need it?
Refactor MLP weight prefetch to consistency with MoE Model's prefetching
in terms of code and usage.
Environments VLLM_ASCEND_ENABLE_PREFETCH_MLP,
VLLM_ASCEND_MLP_DOWN_PREFETCH_SIZE and
VLLM_ASCEND_MLP_GATE_UP_PREFETCH_SIZE is removed, usage as following:

--additional-config '{"weight_prefetch_config": { "enabled": true,
"prefetch_ratio": {"mlp": { "gate_up": 1.0, "down": 1.0} }}}'

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.14.1
- vLLM main:
dc917cceb8

---------

Signed-off-by: leo-pony <nengjunma@outlook.com>
This commit is contained in:
Nengjun Ma
2026-02-04 09:08:18 +08:00
committed by GitHub
parent fa56abea9f
commit 78fad4e348
18 changed files with 250 additions and 171 deletions

View File

@@ -83,7 +83,6 @@ async def test_models(model: str, mode: str, tp_size: int) -> None:
"TASK_QUEUE_ENABLE": "1",
"HCCL_OP_EXPANSION_MODE": "AIV",
"VLLM_ASCEND_ENABLE_FLASHCOMM": "1",
"VLLM_ASCEND_ENABLE_PREFETCH_MLP": "1"
}
compilation_config = {
"cudagraph_mode":
@@ -98,7 +97,8 @@ async def test_models(model: str, mode: str, tp_size: int) -> None:
str(port), "--max-model-len", "40960", "--max-num-batched-tokens",
"40960", "--block-size", "128", "--trust-remote-code",
"--reasoning-parser", "qwen3", "--gpu-memory-utilization", "0.9",
"--async-scheduling"
"--async-scheduling", "--additional-config",
'{"weight_prefetch_config":{"enabled":true}}',
]
if mode == "single":
server_args.append("--enforce-eager")

View File

@@ -72,7 +72,6 @@ async def test_models(model: str, tp_size: int) -> None:
"OMP_PROC_BIND": "false",
"VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE": "1",
"VLLM_ASCEND_ENABLE_FLASHCOMM": "1",
"VLLM_ASCEND_ENABLE_PREFETCH_MLP": "1"
}
server_args = [
"--quantization", "ascend", "--tensor-parallel-size",
@@ -82,7 +81,8 @@ async def test_models(model: str, tp_size: int) -> None:
"0.9", "--block-size", "128", "--max-num-seqs", "256",
"--enforce-eager", "--max-model-len", "35840",
"--max-num-batched-tokens", "35840", "--additional-config",
'{"enable_weight_nz_layout":true}', "--compilation-config",
'{"enable_weight_nz_layout":true, "weight_prefetch_config":{"enabled": true}}',
"--compilation-config",
'{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes":[1,8,24,48,60]}'
]
with RemoteOpenAIServer(model,

View File

@@ -75,8 +75,7 @@ async def test_models(model: str, mode: str, tp_size: int) -> None:
"OMP_PROC_BIND": "false",
"HCCL_OP_EXPANSION_MODE": "AIV",
"VLLM_ASCEND_ENABLE_FLASHCOMM": "1",
"VLLM_ASCEND_ENABLE_DEBSE_OPTIMIZE": "1",
"VLLM_ASCEND_ENABLE_PREFETCH_MLP": "1"
"VLLM_ASCEND_ENABLE_DEBSE_OPTIMIZE": "1"
}
server_args = [
"--tensor-parallel-size",
@@ -86,7 +85,7 @@ async def test_models(model: str, mode: str, tp_size: int) -> None:
"--gpu-memory-utilization", "0.9", "--compilation_config",
'{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes": [1, 8, 24, 48, 60]}',
"--reasoning-parser", "deepseek_r1", "--distributed_executor_backend",
"mp"
"mp", "--additional-config", '{"weight_prefetch_config":{"enabled":true}}'
]
if mode == "single":
server_args.remove("--compilation_config")