Revert "drop ascend scheduler" (#4580)

Reverts vllm-project/vllm-ascend#4498
- vLLM version: v0.11.2
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.2
This commit is contained in:
Mengqing Cao
2025-11-29 22:20:48 +08:00
committed by GitHub
parent 4dbe4fd123
commit 517fd9272d
52 changed files with 2948 additions and 85 deletions

View File

@@ -33,6 +33,7 @@ MODES = [
"single",
"aclgraph",
"aclgraph_mlapo",
"no_chunkprefill",
]
prompts = [
@@ -81,6 +82,9 @@ async def test_models(model: str, mode: str) -> None:
"method": "deepseek_mtp"
}
additional_config = {
"ascend_scheduler_config": {
"enabled": False
},
"torchair_graph_config": {
"enabled": True,
"enable_multistream_moe": False,
@@ -108,6 +112,10 @@ async def test_models(model: str, mode: str) -> None:
if mode == "aclgraph_mlapo":
env_dict["VLLM_ASCEND_ENABLE_MLAPO"] = "1"
additional_config["torchair_graph_config"] = {"enabled": False}
if mode == "no_chunkprefill":
additional_config["ascend_scheduler_config"] = {"enabled": True}
i = server_args.index("--max-num-batched-tokens") + 1
server_args[i] = "36864"
server_args.extend(["--additional-config", json.dumps(additional_config)])
request_keyword_args: dict[str, Any] = {
**api_keyword_args,

View File

@@ -71,6 +71,9 @@ async def test_models(model: str) -> None:
"cudagraph_mode": "FULL_DECODE_ONLY"
}
additional_config: dict[str, Any] = {
"ascend_scheduler_config": {
"enabled": False
},
"torchair_graph_config": {
"enabled": True
},

View File

@@ -92,6 +92,7 @@ async def test_models(model: str, tp_size: int, dp_size: int,
"--gpu-memory-utilization",
"0.9",
"--additional-config",
'{"ascend_scheduler_config":{"enabled":true},'
'"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}',
]
if full_graph:

View File

@@ -85,8 +85,9 @@ async def test_models(model: str, tp_size: int) -> None:
str(tp_size), "--port",
str(port), "--max-model-len", "30000", "--max-num-batched-tokens",
"40000", "--max-num-seqs", "400", "--trust-remote-code",
"--gpu-memory-utilization", "0.8", "--compilation_config",
'{"cudagraph_mode": "FULL_DECODE_ONLY"}'
"--gpu-memory-utilization", "0.8", "--additional-config",
'{"ascend_scheduler_config":{"enabled":false}}',
"--compilation_config", '{"cudagraph_mode": "FULL_DECODE_ONLY"}'
]
request_keyword_args: dict[str, Any] = {
**api_keyword_args,

View File

@@ -60,7 +60,11 @@ async def test_models(model: str) -> None:
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1"
}
additional_config: dict[str, Any] = {}
additional_config: dict[str, Any] = {
"ascend_scheduler_config": {
"enabled": False
},
}
compilation_config = {"cudagraph_mode": "FULL_DECODE_ONLY"}
server_args = [
"--quantization", "ascend", "--async-scheduling",

View File

@@ -63,6 +63,11 @@ async def test_models(model: str, mode: str) -> None:
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1"
}
additional_config: dict[str, Any] = {
"ascend_scheduler_config": {
"enabled": False
},
}
compilation_config = {"cudagraph_mode": "FULL_DECODE_ONLY"}
server_args = [
"--quantization", "ascend", "--async-scheduling",
@@ -77,6 +82,7 @@ async def test_models(model: str, mode: str) -> None:
server_args.extend(
["--compilation-config",
json.dumps(compilation_config)])
server_args.extend(["--additional-config", json.dumps(additional_config)])
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}

View File

@@ -93,6 +93,8 @@ async def test_models(model: str, mode: str, tp_size: int) -> None:
server_args.remove(
'{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes": [1, 8, 24, 48, 60]}'
)
server_args.append("--additional-config")
server_args.append('{"ascend_scheduler_config":{"enabled":true}}')
server_args.append("--enforce-eager")
request_keyword_args: dict[str, Any] = {
**api_keyword_args,