Files
xc-llm-ascend/tests/e2e/multicard/long_sequence/test_basic.py
dsxsteven 30778f371b [BugFix] Fix num_pcp_pads Assignment Issues (#5273)
### What this PR does / why we need it?
The variable `self.num_pcp_pads` was incorrectly truncated during
assignment, causing errors in certain scenarios such as PD
disaggregated. This issue has now been resolved.
### Does this PR introduce _any_ user-facing change?
NO
### How was this patch tested?

Co-author by: QiuChunshuo <qiuchunshuo@huawei.com>

- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c

---------

Signed-off-by: daishixun <dsxsteven@sina.com>
Co-authored-by: weijinqian0 <1184188277@qq.com>
2025-12-25 10:38:09 +08:00

272 lines
11 KiB
Python

#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
#
"""Compare the short outputs of HF and vLLM when using greedy sampling.
Run `pytest tests/e2e/multicard/test_qwen3_moe.py`.
"""
import os
import pytest
from vllm import SamplingParams
from tests.e2e.conftest import VllmRunner
from vllm_ascend.utils import vllm_version_is
os.environ["HCCL_BUFFSIZE"] = "768"
@pytest.mark.skipif(vllm_version_is('0.12.0'),
reason="0.12.0 is not supported for context sequence.")
def test_models_pcp_dcp_basic():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
with VllmRunner(model,
enforce_eager=True,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=2,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128) as runner:
runner.model.generate(prompts, sampling_params)
model = "vllm-ascend/Qwen3-30B-A3B-W8A8"
with VllmRunner(
model,
enforce_eager=True,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=1,
enable_expert_parallel=True,
block_size=128,
quantization="ascend",
) as runner:
runner.model.generate(prompts, sampling_params)
@pytest.mark.skipif(vllm_version_is('0.12.0'),
reason="0.12.0 is not supported for context sequence.")
def test_models_pcp_dcp_full_graph():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
with VllmRunner(model,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=2,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128,
compilation_config={
"cudagraph_mode": "FULL_DECODE_ONLY",
"cudagraph_capture_sizes": [4, 8, 24, 48, 60]
}) as runner:
runner.model.generate(prompts, sampling_params)
model = "vllm-ascend/Qwen3-30B-A3B-W8A8"
with VllmRunner(model,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=1,
enable_expert_parallel=True,
block_size=128,
quantization="ascend",
compilation_config={
"cudagraph_mode": "FULL_DECODE_ONLY",
"cudagraph_capture_sizes": [4, 8, 24, 48, 60]
}) as runner:
runner.model.generate(prompts, sampling_params)
@pytest.mark.skipif(vllm_version_is('0.12.0'),
reason="0.12.0 is not supported for context sequence.")
def test_models_pcp_dcp_piece_wise():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
with VllmRunner(model,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=2,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128) as runner:
runner.model.generate(prompts, sampling_params)
model = "vllm-ascend/Qwen3-30B-A3B-W8A8"
with VllmRunner(model,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=1,
enable_expert_parallel=True,
block_size=128,
quantization="ascend") as runner:
runner.model.generate(prompts, sampling_params)
@pytest.mark.skipif(vllm_version_is('0.12.0'),
reason="0.12.0 is not supported for context sequence.")
def test_pcp_basic():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
with VllmRunner(model,
enforce_eager=True,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=1,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128) as runner:
runner.model.generate(prompts, sampling_params)
@pytest.mark.skipif(vllm_version_is('0.12.0'),
reason="0.12.0 is not supported for context sequence.")
def test_pcp_full_graph():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
with VllmRunner(model,
enforce_eager=False,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=1,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128,
compilation_config={
"cudagraph_mode": "FULL_DECODE_ONLY",
"cudagraph_capture_sizes": [4, 8, 24, 48, 60]
}) as runner:
runner.model.generate(prompts, sampling_params)
@pytest.mark.skipif(vllm_version_is('0.12.0'),
reason="0.12.0 is not supported for context sequence.")
def test_pcp_piece_wise():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
with VllmRunner(model,
enforce_eager=False,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=1,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128) as runner:
runner.model.generate(prompts, sampling_params)
@pytest.mark.skipif(vllm_version_is('0.12.0'),
reason="0.12.0 is not supported for context sequence.")
def test_dcp_basic():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
with VllmRunner(model,
enforce_eager=True,
max_model_len=1024,
tensor_parallel_size=4,
prefill_context_parallel_size=1,
decode_context_parallel_size=2,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128) as runner:
runner.model.generate(prompts, sampling_params)
@pytest.mark.skipif(vllm_version_is('0.12.0'),
reason="0.12.0 is not supported for context sequence.")
def test_dcp_full_graph():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
with VllmRunner(model,
enforce_eager=False,
max_model_len=1024,
tensor_parallel_size=4,
prefill_context_parallel_size=1,
decode_context_parallel_size=2,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128,
compilation_config={
"cudagraph_mode": "FULL_DECODE_ONLY",
"cudagraph_capture_sizes": [4, 8, 24, 48, 60]
}) as runner:
runner.model.generate(prompts, sampling_params)
@pytest.mark.skipif(vllm_version_is('0.12.0'),
reason="0.12.0 is not supported for context sequence.")
def test_dcp_piece_wise():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
with VllmRunner(model,
enforce_eager=False,
max_model_len=1024,
tensor_parallel_size=4,
prefill_context_parallel_size=1,
decode_context_parallel_size=2,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128) as runner:
runner.model.generate(prompts, sampling_params)