### What this PR does / why we need it?
1.fix "TypeError: get_attn_backend() remove variable": [Refactor
`check_and_update_config`](https://github.com/vllm-project/vllm/pull/35122)
2.fix [Rename `compile_ranges_split_points` to
`compile_ranges_endpoints`](https://github.com/vllm-project/vllm/pull/36027)
3.fix "RuntimeError: device_allocator not a DeviceAllocator":[Replace
memory related torch.cuda
APIs"](https://github.com/vllm-project/vllm/pull/37031)
4.fix [Support multiple KV groups in OffloadingSpec
](https://github.com/vllm-project/vllm/pull/36610) removed
self.offloaded_block_size and changed self.gpu_block_size from a scalar
to a tuple of per-group block sizes, adding block_size_factor.
5.fix [Consolidate
SupportsEagle](https://github.com/vllm-project/vllm/pull/36063) renamed
get_eagle3_aux_hidden_state_layers() to
get_eagle3_default_aux_hidden_state_layers() and added a
supports_eagle3() guard before calling it.
### Does this PR introduce _any_ user-facing change?
NA
### How was this patch tested?
E2E
- vLLM version: v0.17.0
- vLLM main:
8a680463fa
---------
Signed-off-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: Claude Code <noreply@anthropic.com>
129 lines
4.4 KiB
Python
129 lines
4.4 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
|
|
#
|
|
import json
|
|
import os
|
|
from unittest.mock import patch
|
|
|
|
import openai
|
|
import pytest
|
|
from vllm.utils.network_utils import get_open_port
|
|
|
|
from tests.e2e.conftest import RemoteOpenAIServer, VllmRunner
|
|
|
|
|
|
@patch.dict(os.environ, {"HCCL_BUFFSIZE": "1024"})
|
|
def test_qwen3_moe_distributed_mp_tp2_ep():
|
|
example_prompts = [
|
|
"Hello, my name is",
|
|
]
|
|
max_tokens = 5
|
|
with VllmRunner(
|
|
"Qwen/Qwen3-30B-A3B",
|
|
tensor_parallel_size=2,
|
|
enable_expert_parallel=True,
|
|
cudagraph_capture_sizes=[1, 2, 4, 8],
|
|
distributed_executor_backend="mp",
|
|
) as vllm_model:
|
|
vllm_model.generate_greedy(example_prompts, max_tokens)
|
|
|
|
|
|
def test_qwen3_moe_w8a8_distributed_tp2():
|
|
example_prompts = [
|
|
"Hello, my name is",
|
|
]
|
|
max_tokens = 5
|
|
with VllmRunner(
|
|
"vllm-ascend/Qwen3-30B-A3B-W8A8",
|
|
max_model_len=8192,
|
|
tensor_parallel_size=2,
|
|
cudagraph_capture_sizes=[1, 2, 4, 8],
|
|
quantization="ascend",
|
|
) as vllm_model:
|
|
vllm_model.generate_greedy(example_prompts, max_tokens)
|
|
|
|
|
|
def test_qwen3_moe_distributed_aiv_tp2():
|
|
os.environ["HCCL_OP_EXPANSION_MODE"] = "AIV"
|
|
example_prompts = [
|
|
"Hello, my name is",
|
|
]
|
|
dtype = "auto"
|
|
max_tokens = 5
|
|
with VllmRunner(
|
|
"Qwen/Qwen3-30B-A3B",
|
|
dtype=dtype,
|
|
tensor_parallel_size=2,
|
|
cudagraph_capture_sizes=[1, 2, 4, 8],
|
|
) as vllm_model:
|
|
vllm_model.generate_greedy(example_prompts, max_tokens)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_qwen3_moe_w8a8_distributed_tp2_ep_dynamic_eplb():
|
|
from vllm_ascend.utils import vllm_version_is
|
|
|
|
if not vllm_version_is("0.17.0"):
|
|
pytest.skip(
|
|
"EPLB output is different without EPLB, see issue: https://github.com/vllm-project/vllm-ascend/issues/7408",
|
|
)
|
|
model = "vllm-ascend/Qwen3-30B-A3B-W8A8"
|
|
port = get_open_port()
|
|
compilation_config = json.dumps({"cudagraph_capture_sizes": [8]})
|
|
server_args = [
|
|
"--max_model_len",
|
|
"8192",
|
|
"--tensor_parallel_size",
|
|
"2",
|
|
"--enable_expert_parallel",
|
|
"--quantization",
|
|
"ascend",
|
|
"--port",
|
|
str(port),
|
|
"--compilation-config",
|
|
compilation_config,
|
|
]
|
|
env_dict = {"HCCL_BUFFSIZE": "1024"}
|
|
with RemoteOpenAIServer(model, server_args, server_port=port, auto_port=False, env_dict=env_dict) as server:
|
|
client = server.get_async_client()
|
|
batch = await client.completions.create(
|
|
model=model, prompt="What is deeplearning?", max_tokens=400, temperature=0, top_p=1.0, n=1
|
|
)
|
|
gt_choices: list[openai.types.CompletionChoice] = batch.choices
|
|
|
|
# dynamic eplb test
|
|
# Since pytest runs as a daemon, it conflicts with the dynamic eplb manager
|
|
# during initialization in offline mode, so the online mode is used instead.
|
|
env_dict.update({"DYNAMIC_EPLB": "true"})
|
|
additional_config = {
|
|
"eplb_config": {
|
|
"dynamic_eplb": True,
|
|
"expert_heat_collection_interval": 100,
|
|
"algorithm_execution_interval": 20,
|
|
"num_redundant_experts": 2,
|
|
}
|
|
}
|
|
server_args.extend(["--additional-config", json.dumps(additional_config)])
|
|
with RemoteOpenAIServer(model, server_args, server_port=port, auto_port=False, env_dict=env_dict) as server:
|
|
client = server.get_async_client()
|
|
batch = await client.completions.create(
|
|
model=model, prompt="What is deeplearning?", max_tokens=400, temperature=0, top_p=1.0, n=1
|
|
)
|
|
eplb_choices: list[openai.types.CompletionChoice] = batch.choices
|
|
assert gt_choices[0].text == eplb_choices[0].text, f"{gt_choices[0].text=} \n {eplb_choices[0].text=}"
|