Drop torchair (#4814)
aclgraph is stable and fast now. Let's drop torchair graph mode now.
TODO: some logic to adapt torchair should be cleaned up as well. We'll
do it in the following PR.
- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: Mengqing Cao <cmq0113@163.com>
This commit is contained in:
@@ -29,7 +29,6 @@ MODELS = [
|
||||
]
|
||||
|
||||
MODES = [
|
||||
"torchair",
|
||||
"single",
|
||||
"aclgraph",
|
||||
"aclgraph_mlapo",
|
||||
@@ -78,13 +77,6 @@ async def test_models(model: str, mode: str) -> None:
|
||||
}
|
||||
speculative_config = {"num_speculative_tokens": 1, "method": "mtp"}
|
||||
additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
"enable_multistream_moe": False,
|
||||
"enable_multistream_mla": True,
|
||||
"graph_batch_sizes": [16],
|
||||
"use_cached_graph": True
|
||||
},
|
||||
"chunked_prefill_for_mla": True,
|
||||
"enable_weight_nz_layout": True
|
||||
}
|
||||
@@ -99,12 +91,8 @@ async def test_models(model: str, mode: str) -> None:
|
||||
]
|
||||
if mode == "single":
|
||||
server_args.append("--enforce-eager")
|
||||
additional_config["torchair_graph_config"] = {"enabled": False}
|
||||
if mode == "aclgraph":
|
||||
additional_config["torchair_graph_config"] = {"enabled": False}
|
||||
if mode == "aclgraph_mlapo":
|
||||
env_dict["VLLM_ASCEND_ENABLE_MLAPO"] = "1"
|
||||
additional_config["torchair_graph_config"] = {"enabled": False}
|
||||
server_args.extend(["--additional-config", json.dumps(additional_config)])
|
||||
request_keyword_args: dict[str, Any] = {
|
||||
**api_keyword_args,
|
||||
|
||||
@@ -68,9 +68,6 @@ async def test_models(model: str) -> None:
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY"
|
||||
}
|
||||
additional_config: dict[str, Any] = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True
|
||||
},
|
||||
"enable_shared_expert_dp": False,
|
||||
"multistream_overlap_shared_expert": False,
|
||||
"dynamic_eplb": True,
|
||||
|
||||
@@ -72,27 +72,13 @@ async def test_models(model: str, tp_size: int, dp_size: int,
|
||||
port = get_open_port()
|
||||
env_dict = {"HCCL_BUFFSIZE": "1024", "VLLM_ASCEND_ENABLE_MLAPO": "0"}
|
||||
server_args = [
|
||||
"--no-enable-prefix-caching",
|
||||
"--enable-expert-parallel",
|
||||
"--no-enable-prefix-caching", "--enable-expert-parallel",
|
||||
"--tensor-parallel-size",
|
||||
str(tp_size),
|
||||
"--data-parallel-size",
|
||||
str(dp_size),
|
||||
"--port",
|
||||
str(port),
|
||||
"--max-model-len",
|
||||
"16384",
|
||||
"--max-num-batched-tokens",
|
||||
"16384",
|
||||
"--block-size",
|
||||
"16",
|
||||
"--trust-remote-code",
|
||||
"--quantization",
|
||||
"ascend",
|
||||
"--gpu-memory-utilization",
|
||||
"0.9",
|
||||
"--additional-config",
|
||||
'{"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}',
|
||||
str(tp_size), "--data-parallel-size",
|
||||
str(dp_size), "--port",
|
||||
str(port), "--max-model-len", "16384", "--max-num-batched-tokens",
|
||||
"16384", "--block-size", "16", "--trust-remote-code", "--quantization",
|
||||
"ascend", "--gpu-memory-utilization", "0.9"
|
||||
]
|
||||
if full_graph:
|
||||
server_args += [
|
||||
|
||||
Reference in New Issue
Block a user