Files
xc-llm-ascend/tests/e2e/multicard/4-cards/long_sequence/test_mtp.py
wjunLu c11a05c4e1 [Main2Main] Upgrade vllm commit to 0113 (#5839)
### What this PR does / why we need it?
Upgrade vllm commit to 0113 (11b6af5280d6d6dfb8953af16e67b25f819b3be9)

- Modify import paths due to the refactors
https://github.com/vllm-project/vllm/pull/31916
https://github.com/vllm-project/vllm/pull/32054

- Fix `TypeError: NPUOffloadingSpec.__init__() takes 2 positional
arguments but 3 were given` due to
https://github.com/vllm-project/vllm/pull/24498

- Skip the async-scheduling tests in
`tests/e2e/multicard/4-cards/long_sequence/test_mtp.py`, which are never
verified
https://github.com/vllm-project/vllm/pull/31998

- Skip some pooling tests, which are caused by
https://github.com/vllm-project/vllm/pull/32148
where vllm is also failed
https://buildkite.com/vllm/ci/builds/46705/steps/canvas?jid=019bb329-3834-4685-862b-1613b8e0f5d4

We will reopen those tests when main2main reachs
https://github.com/vllm-project/vllm/pull/32243

- Skip some cases in
`tests/e2e/multicard/4-cards/long_sequence/test_mtp.py`, which are
broken by
https://github.com/vllm-project/vllm/pull/32118

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef

Signed-off-by: wjunLu <wjunlu217@gmail.com>
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
Co-authored-by: hfadzxy <starmoon_zhang@163.com>
2026-01-15 09:48:53 +08:00

172 lines
5.5 KiB
Python

#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
#
import os
import pytest
from tests.e2e.conftest import VllmRunner
from vllm_ascend.utils import vllm_version_is
os.environ["HCCL_BUFFSIZE"] = "512"
def test_pcp_dcp_mtp1_eager():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "wemaster/deepseek_mtp_main_random_bf16"
with VllmRunner(
model,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=2,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128,
speculative_config={
"num_speculative_tokens": 1,
"method": "deepseek_mtp",
},
enforce_eager=True,
async_scheduling=False,
) as runner:
runner.generate_greedy(prompts, 32)
@pytest.mark.skipif(
not vllm_version_is('0.13.0'),
reason="vLLM PR-32118 break this",
)
def test_pcp_dcp_mtp3_eager():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "wemaster/deepseek_mtp_main_random_bf16"
with VllmRunner(
model,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=2,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128,
speculative_config={
"num_speculative_tokens": 3,
"method": "deepseek_mtp",
},
enforce_eager=True,
async_scheduling=False,
) as runner:
runner.generate_greedy(prompts, 32)
@pytest.mark.skipif(
not vllm_version_is('0.13.0'),
reason="vLLM PR-32118 break this",
)
def test_pcp_dcp_mtp3_piecewise_graph():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "wemaster/deepseek_mtp_main_random_bf16"
with VllmRunner(
model,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=2,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128,
speculative_config={
"num_speculative_tokens": 3,
"method": "deepseek_mtp",
},
compilation_config={
"cudagraph_mode": "PIECEWISE",
"cudagraph_capture_sizes": [4, 8, 16],
},
async_scheduling=False,
) as runner:
runner.generate_greedy(prompts, 32)
@pytest.mark.skipif(
not vllm_version_is('0.13.0'),
reason="vLLM PR-32118 break this",
)
def test_pcp_dcp_mtp3_full_graph():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "wemaster/deepseek_mtp_main_random_bf16"
with VllmRunner(
model,
max_model_len=1024,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=2,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128,
speculative_config={
"num_speculative_tokens": 3,
"method": "deepseek_mtp",
},
compilation_config={
"cudagraph_mode": "FULL_DECODE_ONLY",
"cudagraph_capture_sizes": [4, 8, 16],
},
async_scheduling=False,
) as runner:
runner.generate_greedy(prompts, 32)
def test_dcp_mtp3_full_graph():
prompts = [
"The capital of France is", "Hello, my name is Tom, I am",
"The president of United States is", "AI future is"
]
model = "wemaster/deepseek_mtp_main_random_bf16"
with VllmRunner(
model,
max_model_len=1024,
tensor_parallel_size=2,
decode_context_parallel_size=2,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
block_size=128,
speculative_config={
"num_speculative_tokens": 3,
"method": "deepseek_mtp",
},
compilation_config={
"cudagraph_mode": "FULL_DECODE_ONLY",
"cudagraph_capture_sizes": [4, 8, 16],
},
async_scheduling=False,
) as runner:
runner.generate_greedy(prompts, 32)