Files
xc-llm-ascend/tests/e2e/multicard/2-cards/test_qwen3_performance.py
Nengjun Ma 78fad4e348 [Refactor] MLP weight prefetch to consistency with MoE Model's prefetching in terms of code and usage (#6442)
### What this PR does / why we need it?
Refactor MLP weight prefetch to consistency with MoE Model's prefetching
in terms of code and usage.
Environments VLLM_ASCEND_ENABLE_PREFETCH_MLP,
VLLM_ASCEND_MLP_DOWN_PREFETCH_SIZE and
VLLM_ASCEND_MLP_GATE_UP_PREFETCH_SIZE is removed, usage as following:

--additional-config '{"weight_prefetch_config": { "enabled": true,
"prefetch_ratio": {"mlp": { "gate_up": 1.0, "down": 1.0} }}}'

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.14.1
- vLLM main:
dc917cceb8

---------

Signed-off-by: leo-pony <nengjunma@outlook.com>
2026-02-04 09:08:18 +08:00

102 lines
3.0 KiB
Python

# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from typing import Any
import openai
import pytest
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.vllm_bench import run_vllm_bench_case
MODELS = [
"Qwen/Qwen3-8B",
]
prompts = [
"San Francisco is a",
]
api_keyword_args = {
"max_tokens": 10,
}
vllm_bench_cases = {
"dataset-name": "random",
"num_prompts": 500,
"request_rate": 20,
"random_input_len": 128,
"max_concurrency": 40,
"random_output_len": 100,
"temperature": 0.0,
}
# NOTE: Any changes for the baseline throughput should be approved by team members.
# The origin baseline: 1600.0. For some uncertain reasons, the throughput is decreased to 1514.0
baseline_throughput = 1514.0 # baseline throughput for Qwen3-8B, measured with num_prompts=500
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.asyncio
async def test_models(model: str) -> None:
port = get_open_port()
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"HCCL_OP_EXPANSION_MODE": "AIV",
}
server_args = [
"--async-scheduling",
"--distributed-executor-backend",
"mp",
"--tensor-parallel-size",
"1",
"--port",
str(port),
"--max-model-len",
"5500",
"--max-num-batched-tokens",
"40960",
"--compilation-config",
'{"cudagraph_mode": "FULL_DECODE_ONLY"}',
"--additional-config",
'{"pa_shape_list":[48,64,72,80],"weight_prefetch_config":{"enabled":true}}',
"--block-size",
"128",
"--trust-remote-code",
"--gpu-memory-utilization",
"0.9",
]
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
# vllm bench test
run_vllm_bench_case(model, port, vllm_bench_cases, baseline_throughput)