[TEST]Update nightly cases and add mtpx (#4111)
### What this PR does / why we need it?
This PR updates some nightly test cases and adds mtpx cases, we need to
test them daily
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
By running the test
- vLLM version: v0.11.0
- vLLM main:
83f478bb19
---------
Signed-off-by: jiangyunfan1 <jiangyunfan1@h-partners.com>
This commit is contained in:
@@ -60,6 +60,9 @@ jobs:
|
|||||||
- name: deepseek-r1-w8a8-eplb
|
- name: deepseek-r1-w8a8-eplb
|
||||||
os: linux-aarch64-a3-16
|
os: linux-aarch64-a3-16
|
||||||
tests: tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py
|
tests: tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py
|
||||||
|
- name: deepseek-r1-w8a8-mtpx
|
||||||
|
os: linux-aarch64-a3-16
|
||||||
|
tests: tests/e2e/nightly/features/test_mtpx_deepseek_r1_0528_w8a8.py
|
||||||
- name: qwen2-5-vl-7b
|
- name: qwen2-5-vl-7b
|
||||||
os: linux-aarch64-a3-4
|
os: linux-aarch64-a3-4
|
||||||
tests: tests/e2e/nightly/models/test_qwen2_5_vl_7b.py
|
tests: tests/e2e/nightly/models/test_qwen2_5_vl_7b.py
|
||||||
|
|||||||
138
tests/e2e/nightly/features/test_mtpx_deepseek_r1_0528_w8a8.py
Normal file
138
tests/e2e/nightly/features/test_mtpx_deepseek_r1_0528_w8a8.py
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
# Copyright 2023 The vLLM team.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# This file is a part of the vllm-ascend project.
|
||||||
|
#
|
||||||
|
import json
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import openai
|
||||||
|
import pytest
|
||||||
|
from vllm.utils import get_open_port
|
||||||
|
|
||||||
|
from tests.e2e.conftest import RemoteOpenAIServer
|
||||||
|
from tools.aisbench import run_aisbench_cases
|
||||||
|
|
||||||
|
MODELS = [
|
||||||
|
"vllm-ascend/DeepSeek-R1-0528-W8A8",
|
||||||
|
]
|
||||||
|
|
||||||
|
MODES = ["mtp2", "mtp3"]
|
||||||
|
|
||||||
|
prompts = [
|
||||||
|
"San Francisco is a",
|
||||||
|
]
|
||||||
|
|
||||||
|
api_keyword_args = {
|
||||||
|
"max_tokens": 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
aisbench_cases = [{
|
||||||
|
"case_type": "accuracy",
|
||||||
|
"dataset_path": "vllm-ascend/aime2024",
|
||||||
|
"request_conf": "vllm_api_general_chat",
|
||||||
|
"dataset_conf": "aime2024/aime2024_gen_0_shot_chat_prompt",
|
||||||
|
"max_out_len": 32768,
|
||||||
|
"batch_size": 32,
|
||||||
|
"baseline": 80,
|
||||||
|
"threshold": 7
|
||||||
|
}]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@pytest.mark.parametrize("model", MODELS)
|
||||||
|
@pytest.mark.parametrize("mode", MODES)
|
||||||
|
async def test_models(model: str, mode: str) -> None:
|
||||||
|
port = get_open_port()
|
||||||
|
env_dict = {
|
||||||
|
"OMP_NUM_THREADS": "100",
|
||||||
|
"OMP_PROC_BIND": "false",
|
||||||
|
"HCCL_BUFFSIZE": "1024",
|
||||||
|
"VLLM_RPC_TIMEOUT": "3600000",
|
||||||
|
"VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS": "3600000"
|
||||||
|
}
|
||||||
|
additional_config: dict[str, Any] = {
|
||||||
|
"ascend_scheduler_config": {
|
||||||
|
"enabled": False
|
||||||
|
},
|
||||||
|
}
|
||||||
|
speculative_config = {
|
||||||
|
"num_speculative_tokens": 2,
|
||||||
|
"method": "deepseek_mtp"
|
||||||
|
}
|
||||||
|
compilation_config = {
|
||||||
|
"cudagraph_capture_sizes": [56],
|
||||||
|
"cudagraph_mode": "FULL_DECODE_ONLY"
|
||||||
|
}
|
||||||
|
server_args = [
|
||||||
|
"--quantization",
|
||||||
|
"ascend",
|
||||||
|
"--seed",
|
||||||
|
"1024",
|
||||||
|
"--no-enable-prefix-caching",
|
||||||
|
"--data-parallel-size",
|
||||||
|
"2",
|
||||||
|
"--tensor-parallel-size",
|
||||||
|
"8",
|
||||||
|
"--enable-expert-parallel",
|
||||||
|
"--port",
|
||||||
|
str(port),
|
||||||
|
"--max-model-len",
|
||||||
|
"40960",
|
||||||
|
"--max-num-seqs",
|
||||||
|
"14",
|
||||||
|
"--trust-remote-code",
|
||||||
|
]
|
||||||
|
if mode == "mtp2":
|
||||||
|
server_args.extend(["--max-num-batched-tokens", "4096"])
|
||||||
|
server_args.extend(
|
||||||
|
["--speculative-config",
|
||||||
|
json.dumps(speculative_config)])
|
||||||
|
server_args.extend(["--gpu-memory-utilization", "0.92"])
|
||||||
|
additional_config["torchair_graph_config"] = {"enabled": True}
|
||||||
|
if mode == "mtp3":
|
||||||
|
env_dict["HCCL_OP_EXPANSION_MODE"] = "AIV"
|
||||||
|
server_args.extend(["--max-num-batched-tokens", "2048"])
|
||||||
|
speculative_config["num_speculative_tokens"] = 3
|
||||||
|
server_args.extend(
|
||||||
|
["--speculative-config",
|
||||||
|
json.dumps(speculative_config)])
|
||||||
|
server_args.extend(["--gpu-memory-utilization", "0.9"])
|
||||||
|
server_args.extend(
|
||||||
|
["--compilation-config",
|
||||||
|
json.dumps(compilation_config)])
|
||||||
|
additional_config["torchair_graph_config"] = {"enabled": False}
|
||||||
|
server_args.extend(["--additional-config", json.dumps(additional_config)])
|
||||||
|
request_keyword_args: dict[str, Any] = {
|
||||||
|
**api_keyword_args,
|
||||||
|
}
|
||||||
|
with RemoteOpenAIServer(model,
|
||||||
|
server_args,
|
||||||
|
server_port=port,
|
||||||
|
env_dict=env_dict,
|
||||||
|
auto_port=False) as server:
|
||||||
|
client = server.get_async_client()
|
||||||
|
batch = await client.completions.create(
|
||||||
|
model=model,
|
||||||
|
prompt=prompts,
|
||||||
|
**request_keyword_args,
|
||||||
|
)
|
||||||
|
choices: list[openai.types.CompletionChoice] = batch.choices
|
||||||
|
assert choices[0].text, "empty response"
|
||||||
|
print(choices)
|
||||||
|
# aisbench test
|
||||||
|
run_aisbench_cases(model,
|
||||||
|
port,
|
||||||
|
aisbench_cases,
|
||||||
|
server_args=server_args)
|
||||||
@@ -14,14 +14,13 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
# This file is a part of the vllm-ascend project.
|
# This file is a part of the vllm-ascend project.
|
||||||
#
|
#
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
import openai
|
|
||||||
import pytest
|
import pytest
|
||||||
from vllm.utils import get_open_port
|
from vllm.utils import get_open_port
|
||||||
|
|
||||||
from tests.e2e.conftest import RemoteOpenAIServer
|
from tests.e2e.conftest import RemoteOpenAIServer
|
||||||
from tools.aisbench import run_aisbench_cases
|
from tools.aisbench import run_aisbench_cases
|
||||||
|
from tools.send_request import send_text_request
|
||||||
|
|
||||||
MODELS = [
|
MODELS = [
|
||||||
"vllm-ascend/Qwen3-32B-W8A8",
|
"vllm-ascend/Qwen3-32B-W8A8",
|
||||||
@@ -30,11 +29,13 @@ MODELS = [
|
|||||||
TENSOR_PARALLELS = [4]
|
TENSOR_PARALLELS = [4]
|
||||||
|
|
||||||
prompts = [
|
prompts = [
|
||||||
"San Francisco is a",
|
"9.11 and 9.8, which is greater?",
|
||||||
]
|
]
|
||||||
|
|
||||||
api_keyword_args = {
|
api_keyword_args = {
|
||||||
"max_tokens": 10,
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": True
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
aisbench_cases = [{
|
aisbench_cases = [{
|
||||||
@@ -86,21 +87,14 @@ async def test_models(model: str, tp_size: int) -> None:
|
|||||||
"--compilation-config",
|
"--compilation-config",
|
||||||
'{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes":[1,8,24,48,60]}'
|
'{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes":[1,8,24,48,60]}'
|
||||||
]
|
]
|
||||||
request_keyword_args: dict[str, Any] = {
|
|
||||||
**api_keyword_args,
|
|
||||||
}
|
|
||||||
with RemoteOpenAIServer(model,
|
with RemoteOpenAIServer(model,
|
||||||
server_args,
|
server_args,
|
||||||
server_port=port,
|
server_port=port,
|
||||||
env_dict=env_dict,
|
env_dict=env_dict,
|
||||||
auto_port=False) as server:
|
auto_port=False) as server:
|
||||||
client = server.get_async_client()
|
send_text_request(prompts[0],
|
||||||
batch = await client.completions.create(
|
model,
|
||||||
model=model,
|
server,
|
||||||
prompt=prompts,
|
request_args=api_keyword_args)
|
||||||
**request_keyword_args,
|
|
||||||
)
|
|
||||||
choices: list[openai.types.CompletionChoice] = batch.choices
|
|
||||||
assert choices[0].text, "empty response"
|
|
||||||
# aisbench test
|
# aisbench test
|
||||||
run_aisbench_cases(model, port, aisbench_cases)
|
run_aisbench_cases(model, port, aisbench_cases)
|
||||||
|
|||||||
@@ -28,8 +28,6 @@ MODELS = [
|
|||||||
"vllm-ascend/DeepSeek-R1-W8A8",
|
"vllm-ascend/DeepSeek-R1-W8A8",
|
||||||
]
|
]
|
||||||
|
|
||||||
MODES = ["eplb"]
|
|
||||||
|
|
||||||
prompts = [
|
prompts = [
|
||||||
"San Francisco is a",
|
"San Francisco is a",
|
||||||
]
|
]
|
||||||
@@ -38,51 +36,69 @@ api_keyword_args = {
|
|||||||
"max_tokens": 10,
|
"max_tokens": 10,
|
||||||
}
|
}
|
||||||
|
|
||||||
aisbench_gsm8k = [{
|
aisbench_cases = [{
|
||||||
"case_type": "accuracy",
|
"case_type": "accuracy",
|
||||||
"dataset_path": "vllm-ascend/gsm8k-lite",
|
"dataset_path": "vllm-ascend/gsm8k-lite",
|
||||||
"request_conf": "vllm_api_general_chat",
|
"request_conf": "vllm_api_general_chat",
|
||||||
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
|
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
|
||||||
"max_out_len": 32768,
|
"max_out_len": 32768,
|
||||||
"batch_size": 32,
|
"batch_size": 32,
|
||||||
"top_k": 20,
|
|
||||||
"baseline": 95,
|
"baseline": 95,
|
||||||
"threshold": 5
|
"threshold": 5
|
||||||
}]
|
}]
|
||||||
|
|
||||||
mode_aisbench = {"eplb": aisbench_gsm8k}
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@pytest.mark.parametrize("model", MODELS)
|
@pytest.mark.parametrize("model", MODELS)
|
||||||
@pytest.mark.parametrize("mode", MODES)
|
async def test_models(model: str) -> None:
|
||||||
async def test_models(model: str, mode: str) -> None:
|
|
||||||
port = get_open_port()
|
port = get_open_port()
|
||||||
env_dict = {
|
env_dict = {
|
||||||
"OMP_NUM_THREADS": "10",
|
"OMP_NUM_THREADS": "100",
|
||||||
"OMP_PROC_BIND": "false",
|
"OMP_PROC_BIND": "false",
|
||||||
"HCCL_BUFFSIZE": "1024",
|
"HCCL_BUFFSIZE": "200",
|
||||||
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
|
"VLLM_ASCEND_ENABLE_MLAPO": "1",
|
||||||
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1"
|
"VLLM_RPC_TIMEOUT": "3600000",
|
||||||
|
"VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS": "3600000",
|
||||||
|
"DISABLE_L2_CACHE": "1",
|
||||||
|
"DYNAMIC_EPLB": "true",
|
||||||
|
}
|
||||||
|
speculative_config = {
|
||||||
|
"num_speculative_tokens": 1,
|
||||||
|
"method": "deepseek_mtp"
|
||||||
|
}
|
||||||
|
compilation_config = {
|
||||||
|
"cudagraph_capture_sizes": [24],
|
||||||
|
"cudagraph_mode": "FULL_DECODE_ONLY"
|
||||||
}
|
}
|
||||||
additional_config: dict[str, Any] = {
|
additional_config: dict[str, Any] = {
|
||||||
"ascend_scheduler_config": {
|
"ascend_scheduler_config": {
|
||||||
"enabled": False
|
"enabled": False
|
||||||
},
|
},
|
||||||
|
"torchair_graph_config": {
|
||||||
|
"enabled": True
|
||||||
|
},
|
||||||
|
"enable_shared_expert_dp": False,
|
||||||
|
"multistream_overlap_shared_expert": False,
|
||||||
|
"dynamic_eplb": True,
|
||||||
|
"num_iterations_eplb_update": 14000,
|
||||||
|
"num_wait_worker_iterations": 30,
|
||||||
|
"init_redundancy_expert": 0,
|
||||||
|
"gate_eplb": False
|
||||||
}
|
}
|
||||||
server_args = [
|
server_args = [
|
||||||
"--quantization", "ascend", "--async-scheduling",
|
"--quantization", "ascend", "--seed", "1024",
|
||||||
"--data-parallel-size", "4", "--tensor-parallel-size", "4",
|
"--no-enable-prefix-caching", "--data-parallel-size", "4",
|
||||||
"--enable-expert-parallel", "--port",
|
"--tensor-parallel-size", "4", "--enable-expert-parallel", "--port",
|
||||||
str(port), "--max-model-len", "40960", "--max-num-batched-tokens",
|
str(port), "--max-model-len", "40000", "--max-num-batched-tokens",
|
||||||
"8192", "--max-num-seqs", "12", "--trust-remote-code",
|
"4096", "--max-num-seqs", "12", "--trust-remote-code",
|
||||||
"--gpu-memory-utilization", "0.9"
|
"--gpu-memory-utilization", "0.92"
|
||||||
]
|
]
|
||||||
if mode == "eplb":
|
server_args.extend(
|
||||||
env_dict["DYNAMIC_EPLB"] = "true"
|
["--speculative-config",
|
||||||
additional_config["dynamic_eplb"] = True
|
json.dumps(speculative_config)])
|
||||||
additional_config["num_iterations_eplb_update"] = 2048
|
server_args.extend(
|
||||||
additional_config["num_wait_worker_iterations"] = 200
|
["--compilation-config",
|
||||||
|
json.dumps(compilation_config)])
|
||||||
server_args.extend(["--additional-config", json.dumps(additional_config)])
|
server_args.extend(["--additional-config", json.dumps(additional_config)])
|
||||||
request_keyword_args: dict[str, Any] = {
|
request_keyword_args: dict[str, Any] = {
|
||||||
**api_keyword_args,
|
**api_keyword_args,
|
||||||
@@ -102,7 +118,6 @@ async def test_models(model: str, mode: str) -> None:
|
|||||||
assert choices[0].text, "empty response"
|
assert choices[0].text, "empty response"
|
||||||
print(choices)
|
print(choices)
|
||||||
# aisbench test
|
# aisbench test
|
||||||
aisbench_cases = mode_aisbench[mode]
|
|
||||||
run_aisbench_cases(model,
|
run_aisbench_cases(model,
|
||||||
port,
|
port,
|
||||||
aisbench_cases,
|
aisbench_cases,
|
||||||
|
|||||||
@@ -28,8 +28,6 @@ MODELS = [
|
|||||||
"vllm-ascend/Qwen3-235B-A22B-W8A8",
|
"vllm-ascend/Qwen3-235B-A22B-W8A8",
|
||||||
]
|
]
|
||||||
|
|
||||||
MODES = ["eplb"]
|
|
||||||
|
|
||||||
prompts = [
|
prompts = [
|
||||||
"San Francisco is a",
|
"San Francisco is a",
|
||||||
]
|
]
|
||||||
@@ -38,7 +36,7 @@ api_keyword_args = {
|
|||||||
"max_tokens": 10,
|
"max_tokens": 10,
|
||||||
}
|
}
|
||||||
|
|
||||||
aisbench_gsm8k = [{
|
aisbench_cases = [{
|
||||||
"case_type": "accuracy",
|
"case_type": "accuracy",
|
||||||
"dataset_path": "vllm-ascend/gsm8k-lite",
|
"dataset_path": "vllm-ascend/gsm8k-lite",
|
||||||
"request_conf": "vllm_api_general_chat",
|
"request_conf": "vllm_api_general_chat",
|
||||||
@@ -47,17 +45,13 @@ aisbench_gsm8k = [{
|
|||||||
"batch_size": 32,
|
"batch_size": 32,
|
||||||
"top_k": 20,
|
"top_k": 20,
|
||||||
"baseline": 95,
|
"baseline": 95,
|
||||||
"threshold": 5,
|
"threshold": 5
|
||||||
"topk": 20
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
mode_aisbench = {"eplb": aisbench_gsm8k}
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@pytest.mark.parametrize("model", MODELS)
|
@pytest.mark.parametrize("model", MODELS)
|
||||||
@pytest.mark.parametrize("mode", MODES)
|
async def test_models(model: str) -> None:
|
||||||
async def test_models(model: str, mode: str) -> None:
|
|
||||||
port = get_open_port()
|
port = get_open_port()
|
||||||
env_dict = {
|
env_dict = {
|
||||||
"OMP_NUM_THREADS": "10",
|
"OMP_NUM_THREADS": "10",
|
||||||
@@ -71,6 +65,7 @@ async def test_models(model: str, mode: str) -> None:
|
|||||||
"enabled": False
|
"enabled": False
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
compilation_config = {"cudagraph_mode": "FULL_DECODE_ONLY"}
|
||||||
server_args = [
|
server_args = [
|
||||||
"--quantization", "ascend", "--async-scheduling",
|
"--quantization", "ascend", "--async-scheduling",
|
||||||
"--data-parallel-size", "4", "--tensor-parallel-size", "4",
|
"--data-parallel-size", "4", "--tensor-parallel-size", "4",
|
||||||
@@ -79,11 +74,16 @@ async def test_models(model: str, mode: str) -> None:
|
|||||||
"8192", "--max-num-seqs", "12", "--trust-remote-code",
|
"8192", "--max-num-seqs", "12", "--trust-remote-code",
|
||||||
"--gpu-memory-utilization", "0.9"
|
"--gpu-memory-utilization", "0.9"
|
||||||
]
|
]
|
||||||
if mode == "eplb":
|
env_dict["EXPERT_MAP_RECORD"] = "true"
|
||||||
env_dict["DYNAMIC_EPLB"] = "true"
|
env_dict["DYNAMIC_EPLB"] = "true"
|
||||||
additional_config["dynamic_eplb"] = True
|
additional_config["dynamic_eplb"] = True
|
||||||
additional_config["num_iterations_eplb_update"] = 2048
|
additional_config["num_iterations_eplb_update"] = 14000
|
||||||
additional_config["num_wait_worker_iterations"] = 200
|
additional_config["num_wait_worker_iterations"] = 30
|
||||||
|
additional_config["init_redundancy_expert"] = 0
|
||||||
|
additional_config["gate_eplb"] = False
|
||||||
|
server_args.extend(
|
||||||
|
["--compilation-config",
|
||||||
|
json.dumps(compilation_config)])
|
||||||
server_args.extend(["--additional-config", json.dumps(additional_config)])
|
server_args.extend(["--additional-config", json.dumps(additional_config)])
|
||||||
request_keyword_args: dict[str, Any] = {
|
request_keyword_args: dict[str, Any] = {
|
||||||
**api_keyword_args,
|
**api_keyword_args,
|
||||||
@@ -103,7 +103,6 @@ async def test_models(model: str, mode: str) -> None:
|
|||||||
assert choices[0].text, "empty response"
|
assert choices[0].text, "empty response"
|
||||||
print(choices)
|
print(choices)
|
||||||
# aisbench test
|
# aisbench test
|
||||||
aisbench_cases = mode_aisbench[mode]
|
|
||||||
run_aisbench_cases(model,
|
run_aisbench_cases(model,
|
||||||
port,
|
port,
|
||||||
aisbench_cases,
|
aisbench_cases,
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
# This file is a part of the vllm-ascend project.
|
# This file is a part of the vllm-ascend project.
|
||||||
#
|
#
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
@@ -44,8 +45,8 @@ api_keyword_args = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
batch_size_dict = {
|
batch_size_dict = {
|
||||||
"linux-aarch64-a2-4": 44,
|
"linux-aarch64-a2-4": 72,
|
||||||
"linux-aarch64-a3-4": 46,
|
"linux-aarch64-a3-4": 76,
|
||||||
}
|
}
|
||||||
VLLM_CI_RUNNER = os.getenv("VLLM_CI_RUNNER", "linux-aarch64-a2-4")
|
VLLM_CI_RUNNER = os.getenv("VLLM_CI_RUNNER", "linux-aarch64-a2-4")
|
||||||
performance_batch_size = batch_size_dict.get(VLLM_CI_RUNNER, 1)
|
performance_batch_size = batch_size_dict.get(VLLM_CI_RUNNER, 1)
|
||||||
@@ -80,21 +81,32 @@ async def test_models(model: str, mode: str, tp_size: int) -> None:
|
|||||||
port = get_open_port()
|
port = get_open_port()
|
||||||
env_dict = {
|
env_dict = {
|
||||||
"TASK_QUEUE_ENABLE": "1",
|
"TASK_QUEUE_ENABLE": "1",
|
||||||
"OMP_PROC_BIND": "false",
|
"VLLM_ASCEND_ENABLE_DENSE_OPTIMIZE": "1",
|
||||||
"HCCL_OP_EXPANSION_MODE": "AIV",
|
"HCCL_OP_EXPANSION_MODE": "AIV",
|
||||||
"PAGED_ATTENTION_MASK_LEN": "5500"
|
"VLLM_ASCEND_ENABLE_FLASHCOMM": "1",
|
||||||
|
"VLLM_ASCEND_ENABLE_PREFETCH_MLP": "1"
|
||||||
|
}
|
||||||
|
compilation_config = {
|
||||||
|
"cudagraph_mode":
|
||||||
|
"FULL_DECODE_ONLY",
|
||||||
|
"cudagraph_capture_sizes":
|
||||||
|
[1, 12, 16, 20, 24, 32, 48, 60, 64, 68, 72, 76, 80]
|
||||||
}
|
}
|
||||||
server_args = [
|
server_args = [
|
||||||
"--quantization", "ascend", "--no-enable-prefix-caching",
|
"--quantization", "ascend", "--no-enable-prefix-caching",
|
||||||
"--tensor-parallel-size",
|
"--tensor-parallel-size",
|
||||||
str(tp_size), "--port",
|
str(tp_size), "--port",
|
||||||
str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
|
str(port), "--max-model-len", "40960", "--max-num-batched-tokens",
|
||||||
"36864", "--block-size", "128", "--trust-remote-code",
|
"40960", "--block-size", "128", "--trust-remote-code",
|
||||||
"--gpu-memory-utilization", "0.9", "--additional-config",
|
"--reasoning-parser", "qwen3", "--gpu-memory-utilization", "0.9",
|
||||||
'{"enable_weight_nz_layout":true}'
|
"--async-scheduling"
|
||||||
]
|
]
|
||||||
if mode == "single":
|
if mode == "single":
|
||||||
server_args.append("--enforce-eager")
|
server_args.append("--enforce-eager")
|
||||||
|
if mode == "aclgraph":
|
||||||
|
server_args.extend(
|
||||||
|
["--compilation-config",
|
||||||
|
json.dumps(compilation_config)])
|
||||||
request_keyword_args: dict[str, Any] = {
|
request_keyword_args: dict[str, Any] = {
|
||||||
**api_keyword_args,
|
**api_keyword_args,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -56,9 +56,9 @@ aisbench_cases = [{
|
|||||||
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
|
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
|
||||||
"request_conf": "vllm_api_stream_chat",
|
"request_conf": "vllm_api_stream_chat",
|
||||||
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
|
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
|
||||||
"num_prompts": 176,
|
"num_prompts": 240,
|
||||||
"max_out_len": 1500,
|
"max_out_len": 1500,
|
||||||
"batch_size": 44,
|
"batch_size": 60,
|
||||||
"baseline": 1,
|
"baseline": 1,
|
||||||
"threshold": 0.97
|
"threshold": 0.97
|
||||||
}]
|
}]
|
||||||
@@ -75,9 +75,8 @@ async def test_models(model: str, mode: str, tp_size: int) -> None:
|
|||||||
"OMP_PROC_BIND": "false",
|
"OMP_PROC_BIND": "false",
|
||||||
"HCCL_OP_EXPANSION_MODE": "AIV",
|
"HCCL_OP_EXPANSION_MODE": "AIV",
|
||||||
"VLLM_ASCEND_ENABLE_FLASHCOMM": "1",
|
"VLLM_ASCEND_ENABLE_FLASHCOMM": "1",
|
||||||
"VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE": "1",
|
|
||||||
"VLLM_ASCEND_ENABLE_DEBSE_OPTIMIZE": "1",
|
"VLLM_ASCEND_ENABLE_DEBSE_OPTIMIZE": "1",
|
||||||
"VLLM_ASCEND_ENABLE_PREFETCH": "1"
|
"VLLM_ASCEND_ENABLE_PREFETCH_MLP": "1"
|
||||||
}
|
}
|
||||||
server_args = [
|
server_args = [
|
||||||
"--tensor-parallel-size",
|
"--tensor-parallel-size",
|
||||||
|
|||||||
23
tools/send_request.py
Normal file
23
tools/send_request.py
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
data: dict[str, Any] = {
|
||||||
|
"messages": [{
|
||||||
|
"role": "user",
|
||||||
|
"content": "",
|
||||||
|
}],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def send_text_request(prompt, model, server, request_args=None):
|
||||||
|
data["messages"][0]["content"] = prompt
|
||||||
|
data["model"] = model
|
||||||
|
url = server.url_for("v1", "chat", "completions")
|
||||||
|
if request_args:
|
||||||
|
data.update(request_args)
|
||||||
|
response = requests.post(url, json=data)
|
||||||
|
print("Status Code:", response.status_code)
|
||||||
|
response_json = response.json()
|
||||||
|
print("Response:", response_json)
|
||||||
|
assert response_json["choices"][0]["message"]["content"], "empty response"
|
||||||
Reference in New Issue
Block a user