diff --git a/.github/workflows/vllm_ascend_test_nightly_a3.yaml b/.github/workflows/vllm_ascend_test_nightly_a3.yaml index 72eda3d8..a77e1d11 100644 --- a/.github/workflows/vllm_ascend_test_nightly_a3.yaml +++ b/.github/workflows/vllm_ascend_test_nightly_a3.yaml @@ -60,6 +60,9 @@ jobs: - name: deepseek-r1-w8a8-eplb os: linux-aarch64-a3-16 tests: tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py + - name: deepseek-r1-w8a8-mtpx + os: linux-aarch64-a3-16 + tests: tests/e2e/nightly/features/test_mtpx_deepseek_r1_0528_w8a8.py - name: qwen2-5-vl-7b os: linux-aarch64-a3-4 tests: tests/e2e/nightly/models/test_qwen2_5_vl_7b.py diff --git a/tests/e2e/nightly/features/test_mtpx_deepseek_r1_0528_w8a8.py b/tests/e2e/nightly/features/test_mtpx_deepseek_r1_0528_w8a8.py new file mode 100644 index 00000000..8677c074 --- /dev/null +++ b/tests/e2e/nightly/features/test_mtpx_deepseek_r1_0528_w8a8.py @@ -0,0 +1,138 @@ +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# Copyright 2023 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# +import json +from typing import Any + +import openai +import pytest +from vllm.utils import get_open_port + +from tests.e2e.conftest import RemoteOpenAIServer +from tools.aisbench import run_aisbench_cases + +MODELS = [ + "vllm-ascend/DeepSeek-R1-0528-W8A8", +] + +MODES = ["mtp2", "mtp3"] + +prompts = [ + "San Francisco is a", +] + +api_keyword_args = { + "max_tokens": 10, +} + +aisbench_cases = [{ + "case_type": "accuracy", + "dataset_path": "vllm-ascend/aime2024", + "request_conf": "vllm_api_general_chat", + "dataset_conf": "aime2024/aime2024_gen_0_shot_chat_prompt", + "max_out_len": 32768, + "batch_size": 32, + "baseline": 80, + "threshold": 7 +}] + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("mode", MODES) +async def test_models(model: str, mode: str) -> None: + port = get_open_port() + env_dict = { + "OMP_NUM_THREADS": "100", + "OMP_PROC_BIND": "false", + "HCCL_BUFFSIZE": "1024", + "VLLM_RPC_TIMEOUT": "3600000", + "VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS": "3600000" + } + additional_config: dict[str, Any] = { + "ascend_scheduler_config": { + "enabled": False + }, + } + speculative_config = { + "num_speculative_tokens": 2, + "method": "deepseek_mtp" + } + compilation_config = { + "cudagraph_capture_sizes": [56], + "cudagraph_mode": "FULL_DECODE_ONLY" + } + server_args = [ + "--quantization", + "ascend", + "--seed", + "1024", + "--no-enable-prefix-caching", + "--data-parallel-size", + "2", + "--tensor-parallel-size", + "8", + "--enable-expert-parallel", + "--port", + str(port), + "--max-model-len", + "40960", + "--max-num-seqs", + "14", + "--trust-remote-code", + ] + if mode == "mtp2": + server_args.extend(["--max-num-batched-tokens", "4096"]) + server_args.extend( + ["--speculative-config", + json.dumps(speculative_config)]) + server_args.extend(["--gpu-memory-utilization", "0.92"]) + additional_config["torchair_graph_config"] = {"enabled": True} + if mode == "mtp3": + env_dict["HCCL_OP_EXPANSION_MODE"] = "AIV" + server_args.extend(["--max-num-batched-tokens", "2048"]) + speculative_config["num_speculative_tokens"] = 3 + server_args.extend( + ["--speculative-config", + json.dumps(speculative_config)]) + server_args.extend(["--gpu-memory-utilization", "0.9"]) + server_args.extend( + ["--compilation-config", + json.dumps(compilation_config)]) + additional_config["torchair_graph_config"] = {"enabled": False} + server_args.extend(["--additional-config", json.dumps(additional_config)]) + request_keyword_args: dict[str, Any] = { + **api_keyword_args, + } + with RemoteOpenAIServer(model, + server_args, + server_port=port, + env_dict=env_dict, + auto_port=False) as server: + client = server.get_async_client() + batch = await client.completions.create( + model=model, + prompt=prompts, + **request_keyword_args, + ) + choices: list[openai.types.CompletionChoice] = batch.choices + assert choices[0].text, "empty response" + print(choices) + # aisbench test + run_aisbench_cases(model, + port, + aisbench_cases, + server_args=server_args) diff --git a/tests/e2e/nightly/features/test_qwen3_32b_int8_a3_feature_stack3.py b/tests/e2e/nightly/features/test_qwen3_32b_int8_a3_feature_stack3.py index 7ff88b57..17a7f4b6 100644 --- a/tests/e2e/nightly/features/test_qwen3_32b_int8_a3_feature_stack3.py +++ b/tests/e2e/nightly/features/test_qwen3_32b_int8_a3_feature_stack3.py @@ -14,14 +14,13 @@ # limitations under the License. # This file is a part of the vllm-ascend project. # -from typing import Any -import openai import pytest from vllm.utils import get_open_port from tests.e2e.conftest import RemoteOpenAIServer from tools.aisbench import run_aisbench_cases +from tools.send_request import send_text_request MODELS = [ "vllm-ascend/Qwen3-32B-W8A8", @@ -30,11 +29,13 @@ MODELS = [ TENSOR_PARALLELS = [4] prompts = [ - "San Francisco is a", + "9.11 and 9.8, which is greater?", ] api_keyword_args = { - "max_tokens": 10, + "chat_template_kwargs": { + "enable_thinking": True + }, } aisbench_cases = [{ @@ -86,21 +87,14 @@ async def test_models(model: str, tp_size: int) -> None: "--compilation-config", '{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes":[1,8,24,48,60]}' ] - request_keyword_args: dict[str, Any] = { - **api_keyword_args, - } with RemoteOpenAIServer(model, server_args, server_port=port, env_dict=env_dict, auto_port=False) as server: - client = server.get_async_client() - batch = await client.completions.create( - model=model, - prompt=prompts, - **request_keyword_args, - ) - choices: list[openai.types.CompletionChoice] = batch.choices - assert choices[0].text, "empty response" + send_text_request(prompts[0], + model, + server, + request_args=api_keyword_args) # aisbench test run_aisbench_cases(model, port, aisbench_cases) diff --git a/tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py b/tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py index 89449ac4..bca2baf0 100644 --- a/tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py +++ b/tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py @@ -28,8 +28,6 @@ MODELS = [ "vllm-ascend/DeepSeek-R1-W8A8", ] -MODES = ["eplb"] - prompts = [ "San Francisco is a", ] @@ -38,51 +36,69 @@ api_keyword_args = { "max_tokens": 10, } -aisbench_gsm8k = [{ +aisbench_cases = [{ "case_type": "accuracy", "dataset_path": "vllm-ascend/gsm8k-lite", "request_conf": "vllm_api_general_chat", "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt", "max_out_len": 32768, "batch_size": 32, - "top_k": 20, "baseline": 95, "threshold": 5 }] -mode_aisbench = {"eplb": aisbench_gsm8k} - @pytest.mark.asyncio @pytest.mark.parametrize("model", MODELS) -@pytest.mark.parametrize("mode", MODES) -async def test_models(model: str, mode: str) -> None: +async def test_models(model: str) -> None: port = get_open_port() env_dict = { - "OMP_NUM_THREADS": "10", + "OMP_NUM_THREADS": "100", "OMP_PROC_BIND": "false", - "HCCL_BUFFSIZE": "1024", - "PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True", - "VLLM_ASCEND_ENABLE_FLASHCOMM1": "1" + "HCCL_BUFFSIZE": "200", + "VLLM_ASCEND_ENABLE_MLAPO": "1", + "VLLM_RPC_TIMEOUT": "3600000", + "VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS": "3600000", + "DISABLE_L2_CACHE": "1", + "DYNAMIC_EPLB": "true", + } + speculative_config = { + "num_speculative_tokens": 1, + "method": "deepseek_mtp" + } + compilation_config = { + "cudagraph_capture_sizes": [24], + "cudagraph_mode": "FULL_DECODE_ONLY" } additional_config: dict[str, Any] = { "ascend_scheduler_config": { "enabled": False }, + "torchair_graph_config": { + "enabled": True + }, + "enable_shared_expert_dp": False, + "multistream_overlap_shared_expert": False, + "dynamic_eplb": True, + "num_iterations_eplb_update": 14000, + "num_wait_worker_iterations": 30, + "init_redundancy_expert": 0, + "gate_eplb": False } server_args = [ - "--quantization", "ascend", "--async-scheduling", - "--data-parallel-size", "4", "--tensor-parallel-size", "4", - "--enable-expert-parallel", "--port", - str(port), "--max-model-len", "40960", "--max-num-batched-tokens", - "8192", "--max-num-seqs", "12", "--trust-remote-code", - "--gpu-memory-utilization", "0.9" + "--quantization", "ascend", "--seed", "1024", + "--no-enable-prefix-caching", "--data-parallel-size", "4", + "--tensor-parallel-size", "4", "--enable-expert-parallel", "--port", + str(port), "--max-model-len", "40000", "--max-num-batched-tokens", + "4096", "--max-num-seqs", "12", "--trust-remote-code", + "--gpu-memory-utilization", "0.92" ] - if mode == "eplb": - env_dict["DYNAMIC_EPLB"] = "true" - additional_config["dynamic_eplb"] = True - additional_config["num_iterations_eplb_update"] = 2048 - additional_config["num_wait_worker_iterations"] = 200 + server_args.extend( + ["--speculative-config", + json.dumps(speculative_config)]) + server_args.extend( + ["--compilation-config", + json.dumps(compilation_config)]) server_args.extend(["--additional-config", json.dumps(additional_config)]) request_keyword_args: dict[str, Any] = { **api_keyword_args, @@ -102,7 +118,6 @@ async def test_models(model: str, mode: str) -> None: assert choices[0].text, "empty response" print(choices) # aisbench test - aisbench_cases = mode_aisbench[mode] run_aisbench_cases(model, port, aisbench_cases, diff --git a/tests/e2e/nightly/models/test_qwen3_235b_a22b_w8a8_eplb.py b/tests/e2e/nightly/models/test_qwen3_235b_a22b_w8a8_eplb.py index 8debeecb..945d7cae 100644 --- a/tests/e2e/nightly/models/test_qwen3_235b_a22b_w8a8_eplb.py +++ b/tests/e2e/nightly/models/test_qwen3_235b_a22b_w8a8_eplb.py @@ -28,8 +28,6 @@ MODELS = [ "vllm-ascend/Qwen3-235B-A22B-W8A8", ] -MODES = ["eplb"] - prompts = [ "San Francisco is a", ] @@ -38,7 +36,7 @@ api_keyword_args = { "max_tokens": 10, } -aisbench_gsm8k = [{ +aisbench_cases = [{ "case_type": "accuracy", "dataset_path": "vllm-ascend/gsm8k-lite", "request_conf": "vllm_api_general_chat", @@ -47,17 +45,13 @@ aisbench_gsm8k = [{ "batch_size": 32, "top_k": 20, "baseline": 95, - "threshold": 5, - "topk": 20 + "threshold": 5 }] -mode_aisbench = {"eplb": aisbench_gsm8k} - @pytest.mark.asyncio @pytest.mark.parametrize("model", MODELS) -@pytest.mark.parametrize("mode", MODES) -async def test_models(model: str, mode: str) -> None: +async def test_models(model: str) -> None: port = get_open_port() env_dict = { "OMP_NUM_THREADS": "10", @@ -71,6 +65,7 @@ async def test_models(model: str, mode: str) -> None: "enabled": False }, } + compilation_config = {"cudagraph_mode": "FULL_DECODE_ONLY"} server_args = [ "--quantization", "ascend", "--async-scheduling", "--data-parallel-size", "4", "--tensor-parallel-size", "4", @@ -79,11 +74,16 @@ async def test_models(model: str, mode: str) -> None: "8192", "--max-num-seqs", "12", "--trust-remote-code", "--gpu-memory-utilization", "0.9" ] - if mode == "eplb": - env_dict["DYNAMIC_EPLB"] = "true" - additional_config["dynamic_eplb"] = True - additional_config["num_iterations_eplb_update"] = 2048 - additional_config["num_wait_worker_iterations"] = 200 + env_dict["EXPERT_MAP_RECORD"] = "true" + env_dict["DYNAMIC_EPLB"] = "true" + additional_config["dynamic_eplb"] = True + additional_config["num_iterations_eplb_update"] = 14000 + additional_config["num_wait_worker_iterations"] = 30 + additional_config["init_redundancy_expert"] = 0 + additional_config["gate_eplb"] = False + server_args.extend( + ["--compilation-config", + json.dumps(compilation_config)]) server_args.extend(["--additional-config", json.dumps(additional_config)]) request_keyword_args: dict[str, Any] = { **api_keyword_args, @@ -103,7 +103,6 @@ async def test_models(model: str, mode: str) -> None: assert choices[0].text, "empty response" print(choices) # aisbench test - aisbench_cases = mode_aisbench[mode] run_aisbench_cases(model, port, aisbench_cases, diff --git a/tests/e2e/nightly/models/test_qwen3_32b_int8.py b/tests/e2e/nightly/models/test_qwen3_32b_int8.py index bbaf863a..0b047cc3 100644 --- a/tests/e2e/nightly/models/test_qwen3_32b_int8.py +++ b/tests/e2e/nightly/models/test_qwen3_32b_int8.py @@ -14,6 +14,7 @@ # limitations under the License. # This file is a part of the vllm-ascend project. # +import json import os from typing import Any @@ -44,8 +45,8 @@ api_keyword_args = { } batch_size_dict = { - "linux-aarch64-a2-4": 44, - "linux-aarch64-a3-4": 46, + "linux-aarch64-a2-4": 72, + "linux-aarch64-a3-4": 76, } VLLM_CI_RUNNER = os.getenv("VLLM_CI_RUNNER", "linux-aarch64-a2-4") performance_batch_size = batch_size_dict.get(VLLM_CI_RUNNER, 1) @@ -80,21 +81,32 @@ async def test_models(model: str, mode: str, tp_size: int) -> None: port = get_open_port() env_dict = { "TASK_QUEUE_ENABLE": "1", - "OMP_PROC_BIND": "false", + "VLLM_ASCEND_ENABLE_DENSE_OPTIMIZE": "1", "HCCL_OP_EXPANSION_MODE": "AIV", - "PAGED_ATTENTION_MASK_LEN": "5500" + "VLLM_ASCEND_ENABLE_FLASHCOMM": "1", + "VLLM_ASCEND_ENABLE_PREFETCH_MLP": "1" + } + compilation_config = { + "cudagraph_mode": + "FULL_DECODE_ONLY", + "cudagraph_capture_sizes": + [1, 12, 16, 20, 24, 32, 48, 60, 64, 68, 72, 76, 80] } server_args = [ "--quantization", "ascend", "--no-enable-prefix-caching", "--tensor-parallel-size", str(tp_size), "--port", - str(port), "--max-model-len", "36864", "--max-num-batched-tokens", - "36864", "--block-size", "128", "--trust-remote-code", - "--gpu-memory-utilization", "0.9", "--additional-config", - '{"enable_weight_nz_layout":true}' + str(port), "--max-model-len", "40960", "--max-num-batched-tokens", + "40960", "--block-size", "128", "--trust-remote-code", + "--reasoning-parser", "qwen3", "--gpu-memory-utilization", "0.9", + "--async-scheduling" ] if mode == "single": server_args.append("--enforce-eager") + if mode == "aclgraph": + server_args.extend( + ["--compilation-config", + json.dumps(compilation_config)]) request_keyword_args: dict[str, Any] = { **api_keyword_args, } diff --git a/tests/e2e/nightly/models/test_qwq_32b.py b/tests/e2e/nightly/models/test_qwq_32b.py index ad3dd6b2..a60eff22 100644 --- a/tests/e2e/nightly/models/test_qwq_32b.py +++ b/tests/e2e/nightly/models/test_qwq_32b.py @@ -56,9 +56,9 @@ aisbench_cases = [{ "dataset_path": "vllm-ascend/GSM8K-in3500-bs400", "request_conf": "vllm_api_stream_chat", "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf", - "num_prompts": 176, + "num_prompts": 240, "max_out_len": 1500, - "batch_size": 44, + "batch_size": 60, "baseline": 1, "threshold": 0.97 }] @@ -75,9 +75,8 @@ async def test_models(model: str, mode: str, tp_size: int) -> None: "OMP_PROC_BIND": "false", "HCCL_OP_EXPANSION_MODE": "AIV", "VLLM_ASCEND_ENABLE_FLASHCOMM": "1", - "VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE": "1", "VLLM_ASCEND_ENABLE_DEBSE_OPTIMIZE": "1", - "VLLM_ASCEND_ENABLE_PREFETCH": "1" + "VLLM_ASCEND_ENABLE_PREFETCH_MLP": "1" } server_args = [ "--tensor-parallel-size", diff --git a/tools/send_request.py b/tools/send_request.py new file mode 100644 index 00000000..f0bb69a7 --- /dev/null +++ b/tools/send_request.py @@ -0,0 +1,23 @@ +from typing import Any + +import requests + +data: dict[str, Any] = { + "messages": [{ + "role": "user", + "content": "", + }], +} + + +def send_text_request(prompt, model, server, request_args=None): + data["messages"][0]["content"] = prompt + data["model"] = model + url = server.url_for("v1", "chat", "completions") + if request_args: + data.update(request_args) + response = requests.post(url, json=data) + print("Status Code:", response.status_code) + response_json = response.json() + print("Response:", response_json) + assert response_json["choices"][0]["message"]["content"], "empty response"