### What this PR does / why we need it?
related to vllm PR #34043 this pr delete func
‘relax_for_mixed_batch_cudagraphs’, num_reqs no longer equals the actual
number of requests, due to fia operator requires that
query_start_loc[-1] equals the total number of computed tokens, so this
func delete cause the ifa error.
In full graph mode, set num_reqs_paded = num_reqs to fix the error
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.16.0
- vLLM main:
4034c3d32e
---------
Signed-off-by: wangxiaoxin-sherie <wangxiaoxin7@huawei.com>
Co-authored-by: wangxiaoxin-sherie <wangxiaoxin7@huawei.com>
111 lines
3.8 KiB
Python
111 lines
3.8 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
|
|
#
|
|
import os
|
|
|
|
from vllm import SamplingParams
|
|
|
|
from tests.e2e.conftest import VllmRunner
|
|
from tests.e2e.model_utils import check_outputs_equal
|
|
|
|
|
|
def test_qwen3_moe_full_decode_only_tp2():
|
|
if "HCCL_OP_EXPANSION_MODE" in os.environ:
|
|
del os.environ["HCCL_OP_EXPANSION_MODE"]
|
|
prompts = [
|
|
"Hello, my name is",
|
|
"The president of the United States is",
|
|
"The capital of France is",
|
|
"The future of AI is",
|
|
]
|
|
model = "Qwen/Qwen3-30B-A3B"
|
|
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
|
|
with VllmRunner(
|
|
model,
|
|
max_model_len=1024,
|
|
tensor_parallel_size=2,
|
|
compilation_config={"cudagraph_mode": "FULL_DECODE_ONLY", "cudagraph_capture_sizes": [4, 8, 24, 48, 60]},
|
|
) as runner:
|
|
vllm_fullgraph_outputs = runner.model.generate(prompts, sampling_params)
|
|
|
|
with VllmRunner(
|
|
model,
|
|
max_model_len=1024,
|
|
cudagraph_capture_sizes=[4, 8, 24, 48, 60],
|
|
tensor_parallel_size=2,
|
|
) as runner:
|
|
vllm_eager_outputs = runner.model.generate(prompts, sampling_params)
|
|
|
|
vllm_fullgraph_outputs_list = []
|
|
for output in vllm_fullgraph_outputs:
|
|
vllm_fullgraph_outputs_list.append((output.outputs[0].index, output.outputs[0].text))
|
|
|
|
vllm_eager_outputs_list = []
|
|
for output in vllm_eager_outputs:
|
|
vllm_eager_outputs_list.append((output.outputs[0].index, output.outputs[0].text))
|
|
|
|
check_outputs_equal(
|
|
outputs_0_lst=vllm_eager_outputs_list,
|
|
outputs_1_lst=vllm_fullgraph_outputs_list,
|
|
name_0="vllm_eager_outputs",
|
|
name_1="vllm_fullgraph_outputs",
|
|
)
|
|
|
|
|
|
def test_qwen3_moe_full_graph_tp2():
|
|
if "HCCL_OP_EXPANSION_MODE" in os.environ:
|
|
del os.environ["HCCL_OP_EXPANSION_MODE"]
|
|
prompts = [
|
|
"Hello, my name is",
|
|
"The president of the United States is",
|
|
"The capital of France is",
|
|
"The future of AI is",
|
|
]
|
|
model = "Qwen/Qwen3-30B-A3B"
|
|
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
|
|
with VllmRunner(
|
|
model,
|
|
max_model_len=1024,
|
|
tensor_parallel_size=2,
|
|
compilation_config={"cudagraph_mode": "FULL", "cudagraph_capture_sizes": [4, 8, 24, 48, 60]},
|
|
) as runner:
|
|
vllm_fullgraph_outputs = runner.model.generate(prompts, sampling_params)
|
|
|
|
with VllmRunner(
|
|
model,
|
|
max_model_len=1024,
|
|
cudagraph_capture_sizes=[4, 8, 24, 48, 60],
|
|
tensor_parallel_size=2,
|
|
) as runner:
|
|
vllm_eager_outputs = runner.model.generate(prompts, sampling_params)
|
|
|
|
vllm_fullgraph_outputs_list = []
|
|
for output in vllm_fullgraph_outputs:
|
|
vllm_fullgraph_outputs_list.append((output.outputs[0].index, output.outputs[0].text))
|
|
|
|
vllm_eager_outputs_list = []
|
|
for output in vllm_eager_outputs:
|
|
vllm_eager_outputs_list.append((output.outputs[0].index, output.outputs[0].text))
|
|
|
|
check_outputs_equal(
|
|
outputs_0_lst=vllm_eager_outputs_list,
|
|
outputs_1_lst=vllm_fullgraph_outputs_list,
|
|
name_0="vllm_eager_outputs",
|
|
name_1="vllm_fullgraph_outputs",
|
|
)
|