Drop torchair (#4814)

aclgraph is stable and fast now. Let's drop torchair graph mode now.

TODO: some logic to adapt torchair should be cleaned up as well. We'll
do it in the following PR.

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: Mengqing Cao <cmq0113@163.com>
This commit is contained in:
wangxiyuan
2025-12-10 09:20:40 +08:00
committed by GitHub
parent ba9cda9dfd
commit 835b4c8f1d
84 changed files with 77 additions and 16881 deletions

View File

@@ -73,7 +73,6 @@ async def test_models(model: str, mode: str) -> None:
"VLLM_RPC_TIMEOUT": "3600000",
"VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS": "3600000"
}
additional_config: dict[str, Any] = {}
speculative_config = {"num_speculative_tokens": 2, "method": "mtp"}
compilation_config = {
"cudagraph_capture_sizes": [56],
@@ -104,7 +103,6 @@ async def test_models(model: str, mode: str) -> None:
["--speculative-config",
json.dumps(speculative_config)])
server_args.extend(["--gpu-memory-utilization", "0.92"])
additional_config["torchair_graph_config"] = {"enabled": True}
aisbench_cases = aisbench_gsm8k
if mode == "mtp3":
env_dict["HCCL_OP_EXPANSION_MODE"] = "AIV"
@@ -117,9 +115,7 @@ async def test_models(model: str, mode: str) -> None:
server_args.extend(
["--compilation-config",
json.dumps(compilation_config)])
additional_config["torchair_graph_config"] = {"enabled": False}
aisbench_cases = aisbench_aime
server_args.extend(["--additional-config", json.dumps(additional_config)])
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}

View File

@@ -74,13 +74,6 @@ async def test_models(model: str) -> None:
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
}
additional_config = {
"torchair_graph_config": {
"enabled": True,
"enable_multistream_moe": False,
"enable_multistream_mla": True,
"graph_batch_size": [16],
"use_cached_graph": True
},
"chunked_prefill_for_mla": True,
"enable_weight_nz_layout": True
}

View File

@@ -29,7 +29,6 @@ MODELS = [
]
MODES = [
"torchair",
"single",
"aclgraph",
"aclgraph_mlapo",
@@ -78,13 +77,6 @@ async def test_models(model: str, mode: str) -> None:
}
speculative_config = {"num_speculative_tokens": 1, "method": "mtp"}
additional_config = {
"torchair_graph_config": {
"enabled": True,
"enable_multistream_moe": False,
"enable_multistream_mla": True,
"graph_batch_sizes": [16],
"use_cached_graph": True
},
"chunked_prefill_for_mla": True,
"enable_weight_nz_layout": True
}
@@ -99,12 +91,8 @@ async def test_models(model: str, mode: str) -> None:
]
if mode == "single":
server_args.append("--enforce-eager")
additional_config["torchair_graph_config"] = {"enabled": False}
if mode == "aclgraph":
additional_config["torchair_graph_config"] = {"enabled": False}
if mode == "aclgraph_mlapo":
env_dict["VLLM_ASCEND_ENABLE_MLAPO"] = "1"
additional_config["torchair_graph_config"] = {"enabled": False}
server_args.extend(["--additional-config", json.dumps(additional_config)])
request_keyword_args: dict[str, Any] = {
**api_keyword_args,

View File

@@ -68,9 +68,6 @@ async def test_models(model: str) -> None:
"cudagraph_mode": "FULL_DECODE_ONLY"
}
additional_config: dict[str, Any] = {
"torchair_graph_config": {
"enabled": True
},
"enable_shared_expert_dp": False,
"multistream_overlap_shared_expert": False,
"dynamic_eplb": True,

View File

@@ -72,27 +72,13 @@ async def test_models(model: str, tp_size: int, dp_size: int,
port = get_open_port()
env_dict = {"HCCL_BUFFSIZE": "1024", "VLLM_ASCEND_ENABLE_MLAPO": "0"}
server_args = [
"--no-enable-prefix-caching",
"--enable-expert-parallel",
"--no-enable-prefix-caching", "--enable-expert-parallel",
"--tensor-parallel-size",
str(tp_size),
"--data-parallel-size",
str(dp_size),
"--port",
str(port),
"--max-model-len",
"16384",
"--max-num-batched-tokens",
"16384",
"--block-size",
"16",
"--trust-remote-code",
"--quantization",
"ascend",
"--gpu-memory-utilization",
"0.9",
"--additional-config",
'{"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}',
str(tp_size), "--data-parallel-size",
str(dp_size), "--port",
str(port), "--max-model-len", "16384", "--max-num-batched-tokens",
"16384", "--block-size", "16", "--trust-remote-code", "--quantization",
"ascend", "--gpu-memory-utilization", "0.9"
]
if full_graph:
server_args += [

View File

@@ -1,64 +0,0 @@
test_name: "test DeepSeek-R1-W8A8 torchair on A2"
model: "vllm-ascend/DeepSeek-R1-0528-W8A8"
num_nodes: 2
npu_per_node: 8
env_common:
VLLM_USE_MODELSCOPE: true
HCCL_BUFFSIZE: 1024
SERVER_PORT: 8080
OMP_PROC_BIND: false
OMP_NUM_THREADS: 10
deployment:
-
server_cmd: >
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
--host 0.0.0.0
--port $SERVER_PORT
--data-parallel-size 4
--data-parallel-size-local 2
--data-parallel-address $LOCAL_IP
--data-parallel-rpc-port 13399
--no-enable-prefix-caching
--max-num-seqs 16
--tensor-parallel-size 4
--max-model-len 36864
--max-num-batched-tokens 6000
--enable-expert-parallel
--trust-remote-code
--quantization ascend
--gpu-memory-utilization 0.9
--speculative-config '{"num_speculative_tokens": 1, "method":"mtp"}'
--additional-config '{"torchair_graph_config":{"enabled":true,"enable_multistream_moe":true},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
-
server_cmd: >
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
--headless
--data-parallel-size 4
--data-parallel-rpc-port 13399
--data-parallel-size-local 2
--data-parallel-start-rank 2
--data-parallel-address $MASTER_IP
--no-enable-prefix-caching
--max-num-seqs 16
--tensor-parallel-size 4
--max-model-len 36864
--max-num-batched-tokens 6000
--enable-expert-parallel
--trust-remote-code
--quantization ascend
--gpu-memory-utilization 0.9
--speculative-config '{"num_speculative_tokens": 1, "method":"mtp"}'
--additional-config '{"torchair_graph_config":{"enabled":true,"enable_multistream_moe":true},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
benchmarks:
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 512
baseline: 95
threshold: 5

View File

@@ -58,7 +58,7 @@ deployment:
}
}'
--additional-config
'{"torchair_graph_config":{"enabled":false,"enable_multistream_shared_expert":false},"enable_prefill_optimizations":true,"enable_weight_nz_layout":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
'{"enable_prefill_optimizations":true,"enable_weight_nz_layout":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
-
server_cmd: >
@@ -96,7 +96,7 @@ deployment:
}
}'
--additional-config
'{"torchair_graph_config":{"enabled":false,"enable_multistream_shared_expert":false},"enable_prefill_optimizations":true,"enable_weight_nz_layout":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
'{"enable_prefill_optimizations":true,"enable_weight_nz_layout":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
-
server_cmd: >
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
@@ -135,7 +135,7 @@ deployment:
}
}'
--additional-config
'{"torchair_graph_config":{"enabled":true,"enable_multistream_mla":true,"graph_batch_sizes":[28],"use_cached_graph":true,"enable_super_kernel":false},"multistream_overlap_shared_expert":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
'{"multistream_overlap_shared_expert":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
-
server_cmd: >
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
@@ -173,7 +173,7 @@ deployment:
}
}'
--additional-config
'{"torchair_graph_config":{"enabled":true,"enable_multistream_mla":true,"graph_batch_sizes":[28],"use_cached_graph":true,"enable_super_kernel":false},"multistream_overlap_shared_expert":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
'{"multistream_overlap_shared_expert":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
benchmarks:
perf:
case_type: performance

View File

@@ -57,7 +57,7 @@ deployment:
}
}'
--additional-config
'{"torchair_graph_config":{"enabled":false,"enable_multistream_shared_expert":false},"enable_prefill_optimizations":true,"enable_weight_nz_layout":true}'
'{"enable_prefill_optimizations":true,"enable_weight_nz_layout":true}'
-
server_cmd: >
@@ -95,7 +95,7 @@ deployment:
}
}'
--additional-config
'{"torchair_graph_config":{"enabled":false,"enable_multistream_shared_expert":false},"enable_prefill_optimizations":true,"enable_weight_nz_layout":true}'
'{"enable_prefill_optimizations":true,"enable_weight_nz_layout":true}'
-
server_cmd: >
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
@@ -134,7 +134,7 @@ deployment:
}
}'
--additional-config
'{"torchair_graph_config":{"enabled":true,"enable_multistream_mla":true,"graph_batch_sizes":[28],"use_cached_graph":true,"enable_super_kernel":false},"multistream_overlap_shared_expert":true}'
'{"multistream_overlap_shared_expert":true}'
-
server_cmd: >
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
@@ -172,7 +172,7 @@ deployment:
}
}'
--additional-config
'{"torchair_graph_config":{"enabled":true,"enable_multistream_mla":true,"graph_batch_sizes":[28],"use_cached_graph":true,"enable_super_kernel":false},"multistream_overlap_shared_expert":true}'
'{"multistream_overlap_shared_expert":true}'
benchmarks:
perf:
case_type: performance

View File

@@ -82,7 +82,6 @@ deployment:
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.9
--additional-config '{"torchair_graph_config":{"enabled":true}}'
--kv-transfer-config
'{"kv_connector": "MooncakeConnector",
"kv_role": "kv_consumer",

View File

@@ -29,7 +29,6 @@ deployment:
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.9
--additional-config '{"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
-
server_cmd: >
@@ -49,5 +48,4 @@ deployment:
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.92
--additional-config '{"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
benchmarks: