Revert "drop ascend scheduler" (#4580)
Reverts vllm-project/vllm-ascend#4498 - vLLM version: v0.11.2 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.2
This commit is contained in:
@@ -73,7 +73,11 @@ async def test_models(model: str, mode: str) -> None:
|
||||
"VLLM_RPC_TIMEOUT": "3600000",
|
||||
"VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS": "3600000"
|
||||
}
|
||||
additional_config: dict[str, Any] = {}
|
||||
additional_config: dict[str, Any] = {
|
||||
"ascend_scheduler_config": {
|
||||
"enabled": False
|
||||
},
|
||||
}
|
||||
speculative_config = {
|
||||
"num_speculative_tokens": 2,
|
||||
"method": "deepseek_mtp"
|
||||
|
||||
@@ -74,6 +74,9 @@ async def test_models(model: str) -> None:
|
||||
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
|
||||
}
|
||||
additional_config = {
|
||||
"ascend_scheduler_config": {
|
||||
"enabled": False
|
||||
},
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
"enable_multistream_moe": False,
|
||||
|
||||
@@ -68,7 +68,12 @@ aisbench_cases75 = [{
|
||||
async def test_models(model: str) -> None:
|
||||
port = get_open_port()
|
||||
env_dict = {"TASK_QUEUE_ENABLE": "1", "HCCL_OP_EXPANSION_MODE": "AIV"}
|
||||
additional_config = {"enable_weight_nz_layout": True}
|
||||
additional_config = {
|
||||
"ascend_scheduler_config": {
|
||||
"enabled": False
|
||||
},
|
||||
"enable_weight_nz_layout": True
|
||||
}
|
||||
server_args = [
|
||||
"--quantization", "ascend", "--reasoning-parser", "qwen3",
|
||||
"--tensor-parallel-size", "4", "--port",
|
||||
|
||||
@@ -83,7 +83,8 @@ async def test_models(model: str, tp_size: int) -> None:
|
||||
"0.9", "--block-size", "128", "--max-num-seqs", "256",
|
||||
"--enforce-eager", "--max-model-len", "35840",
|
||||
"--max-num-batched-tokens", "35840", "--additional-config",
|
||||
'{"enable_weight_nz_layout":true}', "--compilation-config",
|
||||
'{"ascend_scheduler_config":{"enabled":true},"enable_weight_nz_layout":true}',
|
||||
"--compilation-config",
|
||||
'{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes":[1,8,24,48,60]}'
|
||||
]
|
||||
with RemoteOpenAIServer(model,
|
||||
|
||||
@@ -33,6 +33,7 @@ MODES = [
|
||||
"single",
|
||||
"aclgraph",
|
||||
"aclgraph_mlapo",
|
||||
"no_chunkprefill",
|
||||
]
|
||||
|
||||
prompts = [
|
||||
@@ -81,6 +82,9 @@ async def test_models(model: str, mode: str) -> None:
|
||||
"method": "deepseek_mtp"
|
||||
}
|
||||
additional_config = {
|
||||
"ascend_scheduler_config": {
|
||||
"enabled": False
|
||||
},
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
"enable_multistream_moe": False,
|
||||
@@ -108,6 +112,10 @@ async def test_models(model: str, mode: str) -> None:
|
||||
if mode == "aclgraph_mlapo":
|
||||
env_dict["VLLM_ASCEND_ENABLE_MLAPO"] = "1"
|
||||
additional_config["torchair_graph_config"] = {"enabled": False}
|
||||
if mode == "no_chunkprefill":
|
||||
additional_config["ascend_scheduler_config"] = {"enabled": True}
|
||||
i = server_args.index("--max-num-batched-tokens") + 1
|
||||
server_args[i] = "36864"
|
||||
server_args.extend(["--additional-config", json.dumps(additional_config)])
|
||||
request_keyword_args: dict[str, Any] = {
|
||||
**api_keyword_args,
|
||||
|
||||
@@ -71,6 +71,9 @@ async def test_models(model: str) -> None:
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY"
|
||||
}
|
||||
additional_config: dict[str, Any] = {
|
||||
"ascend_scheduler_config": {
|
||||
"enabled": False
|
||||
},
|
||||
"torchair_graph_config": {
|
||||
"enabled": True
|
||||
},
|
||||
|
||||
@@ -92,6 +92,7 @@ async def test_models(model: str, tp_size: int, dp_size: int,
|
||||
"--gpu-memory-utilization",
|
||||
"0.9",
|
||||
"--additional-config",
|
||||
'{"ascend_scheduler_config":{"enabled":true},'
|
||||
'"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}',
|
||||
]
|
||||
if full_graph:
|
||||
|
||||
@@ -85,8 +85,9 @@ async def test_models(model: str, tp_size: int) -> None:
|
||||
str(tp_size), "--port",
|
||||
str(port), "--max-model-len", "30000", "--max-num-batched-tokens",
|
||||
"40000", "--max-num-seqs", "400", "--trust-remote-code",
|
||||
"--gpu-memory-utilization", "0.8", "--compilation_config",
|
||||
'{"cudagraph_mode": "FULL_DECODE_ONLY"}'
|
||||
"--gpu-memory-utilization", "0.8", "--additional-config",
|
||||
'{"ascend_scheduler_config":{"enabled":false}}',
|
||||
"--compilation_config", '{"cudagraph_mode": "FULL_DECODE_ONLY"}'
|
||||
]
|
||||
request_keyword_args: dict[str, Any] = {
|
||||
**api_keyword_args,
|
||||
|
||||
@@ -60,7 +60,11 @@ async def test_models(model: str) -> None:
|
||||
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
|
||||
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1"
|
||||
}
|
||||
additional_config: dict[str, Any] = {}
|
||||
additional_config: dict[str, Any] = {
|
||||
"ascend_scheduler_config": {
|
||||
"enabled": False
|
||||
},
|
||||
}
|
||||
compilation_config = {"cudagraph_mode": "FULL_DECODE_ONLY"}
|
||||
server_args = [
|
||||
"--quantization", "ascend", "--async-scheduling",
|
||||
|
||||
@@ -63,6 +63,11 @@ async def test_models(model: str, mode: str) -> None:
|
||||
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
|
||||
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1"
|
||||
}
|
||||
additional_config: dict[str, Any] = {
|
||||
"ascend_scheduler_config": {
|
||||
"enabled": False
|
||||
},
|
||||
}
|
||||
compilation_config = {"cudagraph_mode": "FULL_DECODE_ONLY"}
|
||||
server_args = [
|
||||
"--quantization", "ascend", "--async-scheduling",
|
||||
@@ -77,6 +82,7 @@ async def test_models(model: str, mode: str) -> None:
|
||||
server_args.extend(
|
||||
["--compilation-config",
|
||||
json.dumps(compilation_config)])
|
||||
server_args.extend(["--additional-config", json.dumps(additional_config)])
|
||||
request_keyword_args: dict[str, Any] = {
|
||||
**api_keyword_args,
|
||||
}
|
||||
|
||||
@@ -93,6 +93,8 @@ async def test_models(model: str, mode: str, tp_size: int) -> None:
|
||||
server_args.remove(
|
||||
'{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes": [1, 8, 24, 48, 60]}'
|
||||
)
|
||||
server_args.append("--additional-config")
|
||||
server_args.append('{"ascend_scheduler_config":{"enabled":true}}')
|
||||
server_args.append("--enforce-eager")
|
||||
request_keyword_args: dict[str, Any] = {
|
||||
**api_keyword_args,
|
||||
|
||||
@@ -30,7 +30,7 @@ deployment:
|
||||
--quantization ascend
|
||||
--gpu-memory-utilization 0.9
|
||||
--speculative-config '{"num_speculative_tokens": 1, "method":"deepseek_mtp"}'
|
||||
--additional-config '{"torchair_graph_config":{"enabled":true,"enable_multistream_moe":true},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
||||
--additional-config '{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":true,"enable_multistream_moe":true},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
||||
|
||||
-
|
||||
server_cmd: >
|
||||
@@ -51,7 +51,7 @@ deployment:
|
||||
--quantization ascend
|
||||
--gpu-memory-utilization 0.9
|
||||
--speculative-config '{"num_speculative_tokens": 1, "method":"deepseek_mtp"}'
|
||||
--additional-config '{"torchair_graph_config":{"enabled":true,"enable_multistream_moe":true},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
||||
--additional-config '{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":true,"enable_multistream_moe":true},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
||||
benchmarks:
|
||||
acc:
|
||||
case_type: accuracy
|
||||
|
||||
@@ -31,7 +31,7 @@ deployment:
|
||||
--gpu-memory-utilization 0.9
|
||||
--enforce-eager
|
||||
--speculative-config '{"num_speculative_tokens": 1, "method":"deepseek_mtp"}'
|
||||
--additional-config '{"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
||||
--additional-config '{"ascend_scheduler_config":{"enabled":false},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
||||
|
||||
-
|
||||
server_cmd: >
|
||||
@@ -53,5 +53,5 @@ deployment:
|
||||
--gpu-memory-utilization 0.9
|
||||
--enforce-eager
|
||||
--speculative-config '{"num_speculative_tokens": 1, "method":"deepseek_mtp"}'
|
||||
--additional-config '{"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
||||
--additional-config '{"ascend_scheduler_config":{"enabled":false},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
||||
benchmarks:
|
||||
|
||||
@@ -50,7 +50,7 @@ deployment:
|
||||
"kv_connector_module_path": "vllm_ascend.distributed.llmdatadist_c_mgr_connector"
|
||||
}'
|
||||
--additional-config
|
||||
'{"torchair_graph_config":{"enabled":false,"enable_multistream_shared_expert":false},"enable_prefill_optimizations":true,"enable_weight_nz_layout":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
|
||||
'{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":false,"enable_multistream_shared_expert":false},"enable_prefill_optimizations":true,"enable_weight_nz_layout":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
|
||||
|
||||
-
|
||||
server_cmd: >
|
||||
@@ -80,7 +80,7 @@ deployment:
|
||||
"kv_connector_module_path": "vllm_ascend.distributed.llmdatadist_c_mgr_connector"
|
||||
}'
|
||||
--additional-config
|
||||
'{"torchair_graph_config":{"enabled":false,"enable_multistream_shared_expert":false},"enable_prefill_optimizations":true,"enable_weight_nz_layout":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
|
||||
'{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":false,"enable_multistream_shared_expert":false},"enable_prefill_optimizations":true,"enable_weight_nz_layout":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
|
||||
-
|
||||
server_cmd: >
|
||||
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
|
||||
@@ -111,7 +111,7 @@ deployment:
|
||||
"kv_connector_module_path": "vllm_ascend.distributed.llmdatadist_c_mgr_connector"
|
||||
}'
|
||||
--additional-config
|
||||
'{"torchair_graph_config":{"enabled":true,"enable_multistream_mla":true,"graph_batch_sizes":[28],"use_cached_graph":true,"enable_super_kernel":false},"multistream_overlap_shared_expert":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
|
||||
'{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":true,"enable_multistream_mla":true,"graph_batch_sizes":[28],"use_cached_graph":true,"enable_super_kernel":false},"multistream_overlap_shared_expert":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
|
||||
-
|
||||
server_cmd: >
|
||||
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
|
||||
@@ -141,7 +141,7 @@ deployment:
|
||||
"kv_connector_module_path": "vllm_ascend.distributed.llmdatadist_c_mgr_connector"
|
||||
}'
|
||||
--additional-config
|
||||
'{"torchair_graph_config":{"enabled":true,"enable_multistream_mla":true,"graph_batch_sizes":[28],"use_cached_graph":true,"enable_super_kernel":false},"multistream_overlap_shared_expert":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
|
||||
'{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":true,"enable_multistream_mla":true,"graph_batch_sizes":[28],"use_cached_graph":true,"enable_super_kernel":false},"multistream_overlap_shared_expert":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
|
||||
benchmarks:
|
||||
perf:
|
||||
case_type: performance
|
||||
|
||||
@@ -49,7 +49,7 @@ deployment:
|
||||
"kv_connector_module_path": "vllm_ascend.distributed.llmdatadist_c_mgr_connector"
|
||||
}'
|
||||
--additional-config
|
||||
'{"torchair_graph_config":{"enabled":false,"enable_multistream_shared_expert":false},"enable_prefill_optimizations":true,"enable_weight_nz_layout":true}'
|
||||
'{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":false,"enable_multistream_shared_expert":false},"enable_prefill_optimizations":true,"enable_weight_nz_layout":true}'
|
||||
|
||||
-
|
||||
server_cmd: >
|
||||
@@ -79,7 +79,7 @@ deployment:
|
||||
"kv_connector_module_path": "vllm_ascend.distributed.llmdatadist_c_mgr_connector"
|
||||
}'
|
||||
--additional-config
|
||||
'{"torchair_graph_config":{"enabled":false,"enable_multistream_shared_expert":false},"enable_prefill_optimizations":true,"enable_weight_nz_layout":true}'
|
||||
'{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":false,"enable_multistream_shared_expert":false},"enable_prefill_optimizations":true,"enable_weight_nz_layout":true}'
|
||||
-
|
||||
server_cmd: >
|
||||
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
|
||||
@@ -110,7 +110,7 @@ deployment:
|
||||
"kv_connector_module_path": "vllm_ascend.distributed.llmdatadist_c_mgr_connector"
|
||||
}'
|
||||
--additional-config
|
||||
'{"torchair_graph_config":{"enabled":true,"enable_multistream_mla":true,"graph_batch_sizes":[28],"use_cached_graph":true,"enable_super_kernel":false},"multistream_overlap_shared_expert":true}'
|
||||
'{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":true,"enable_multistream_mla":true,"graph_batch_sizes":[28],"use_cached_graph":true,"enable_super_kernel":false},"multistream_overlap_shared_expert":true}'
|
||||
-
|
||||
server_cmd: >
|
||||
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
|
||||
@@ -140,7 +140,7 @@ deployment:
|
||||
"kv_connector_module_path": "vllm_ascend.distributed.llmdatadist_c_mgr_connector"
|
||||
}'
|
||||
--additional-config
|
||||
'{"torchair_graph_config":{"enabled":true,"enable_multistream_mla":true,"graph_batch_sizes":[28],"use_cached_graph":true,"enable_super_kernel":false},"multistream_overlap_shared_expert":true}'
|
||||
'{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":true,"enable_multistream_mla":true,"graph_batch_sizes":[28],"use_cached_graph":true,"enable_super_kernel":false},"multistream_overlap_shared_expert":true}'
|
||||
benchmarks:
|
||||
perf:
|
||||
case_type: performance
|
||||
|
||||
@@ -29,7 +29,7 @@ deployment:
|
||||
--trust-remote-code
|
||||
--no-enable-prefix-caching
|
||||
--gpu-memory-utilization 0.9
|
||||
--additional-config '{"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
|
||||
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
|
||||
|
||||
-
|
||||
server_cmd: >
|
||||
@@ -49,5 +49,5 @@ deployment:
|
||||
--trust-remote-code
|
||||
--no-enable-prefix-caching
|
||||
--gpu-memory-utilization 0.92
|
||||
--additional-config '{"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
|
||||
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
|
||||
benchmarks:
|
||||
|
||||
Reference in New Issue
Block a user