remove useless patch (#4699)

patach_config is useless now. Let's remove it


- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: Mengqing Cao <cmq0113@163.com>
This commit is contained in:
wangxiyuan
2025-12-08 11:02:42 +08:00
committed by GitHub
parent 866347a621
commit 0b65ac6c4b
21 changed files with 30 additions and 277 deletions

View File

@@ -74,10 +74,7 @@ async def test_models(model: str, mode: str) -> None:
"VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS": "3600000"
}
additional_config: dict[str, Any] = {}
speculative_config = {
"num_speculative_tokens": 2,
"method": "deepseek_mtp"
}
speculative_config = {"num_speculative_tokens": 2, "method": "mtp"}
compilation_config = {
"cudagraph_capture_sizes": [56],
"cudagraph_mode": "FULL_DECODE_ONLY"

View File

@@ -84,10 +84,7 @@ async def test_models(model: str) -> None:
"chunked_prefill_for_mla": True,
"enable_weight_nz_layout": True
}
speculative_config = {
"num_speculative_tokens": 1,
"method": "deepseek_mtp"
}
speculative_config = {"num_speculative_tokens": 1, "method": "mtp"}
server_args = [
"--quantization", "ascend", "--data-parallel-size", "2",
"--tensor-parallel-size", "8", "--enable-expert-parallel", "--port",