Drop torchair (#4814)

aclgraph is stable and fast now. Let's drop torchair graph mode now.

TODO: some logic to adapt torchair should be cleaned up as well. We'll
do it in the following PR.

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: Mengqing Cao <cmq0113@163.com>
This commit is contained in:
wangxiyuan
2025-12-10 09:20:40 +08:00
committed by GitHub
parent ba9cda9dfd
commit 835b4c8f1d
84 changed files with 77 additions and 16881 deletions

View File

@@ -73,7 +73,6 @@ async def test_models(model: str, mode: str) -> None:
"VLLM_RPC_TIMEOUT": "3600000",
"VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS": "3600000"
}
additional_config: dict[str, Any] = {}
speculative_config = {"num_speculative_tokens": 2, "method": "mtp"}
compilation_config = {
"cudagraph_capture_sizes": [56],
@@ -104,7 +103,6 @@ async def test_models(model: str, mode: str) -> None:
["--speculative-config",
json.dumps(speculative_config)])
server_args.extend(["--gpu-memory-utilization", "0.92"])
additional_config["torchair_graph_config"] = {"enabled": True}
aisbench_cases = aisbench_gsm8k
if mode == "mtp3":
env_dict["HCCL_OP_EXPANSION_MODE"] = "AIV"
@@ -117,9 +115,7 @@ async def test_models(model: str, mode: str) -> None:
server_args.extend(
["--compilation-config",
json.dumps(compilation_config)])
additional_config["torchair_graph_config"] = {"enabled": False}
aisbench_cases = aisbench_aime
server_args.extend(["--additional-config", json.dumps(additional_config)])
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}

View File

@@ -74,13 +74,6 @@ async def test_models(model: str) -> None:
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
}
additional_config = {
"torchair_graph_config": {
"enabled": True,
"enable_multistream_moe": False,
"enable_multistream_mla": True,
"graph_batch_size": [16],
"use_cached_graph": True
},
"chunked_prefill_for_mla": True,
"enable_weight_nz_layout": True
}