[CI] cleanup single/multi-card test (#5623)
1. speed up e2e light test.
2. create `2-cards` and `4-cards` folder in multicard
3. move ops to nightly
4. run test in Alphabetical Order
- vLLM version: v0.13.0
- vLLM main:
8be6432bda
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
212
tests/e2e/multicard/4-cards/long_sequence/test_accuracy.py
Normal file
212
tests/e2e/multicard/4-cards/long_sequence/test_accuracy.py
Normal file
@@ -0,0 +1,212 @@
|
||||
#
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# Copyright 2023 The vLLM team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
Compare the outputs of vLLM with and without context parallel.
|
||||
|
||||
Run `pytest tests/e2e/multicard/long_sequence/test_accuracy.py`.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
from tests.e2e.model_utils import check_outputs_equal
|
||||
|
||||
MODELS = [
|
||||
"Qwen/Qwen3-8B",
|
||||
"vllm-ascend/DeepSeek-V2-Lite-W8A8",
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
@pytest.mark.parametrize("max_tokens", [10])
|
||||
def test_models_long_sequence_output_between_tp_and_cp(
|
||||
model: str,
|
||||
max_tokens: int,
|
||||
) -> None:
|
||||
prompts = [
|
||||
"The president of the United States is", "The capital of France is"
|
||||
]
|
||||
|
||||
common_kwargs = {
|
||||
"max_model_len": 1024,
|
||||
}
|
||||
|
||||
if model == "vllm-ascend/DeepSeek-V2-Lite-W8A8":
|
||||
cp_kwargs = {
|
||||
"tensor_parallel_size": 2,
|
||||
"decode_context_parallel_size": 2,
|
||||
"prefill_context_parallel_size": 2,
|
||||
"enable_expert_parallel": True,
|
||||
"enforce_eager": True,
|
||||
"quantization": "ascend",
|
||||
}
|
||||
tp_kwargs = {
|
||||
"tensor_parallel_size": 4,
|
||||
"enable_expert_parallel": True,
|
||||
"enforce_eager": True,
|
||||
"quantization": "ascend",
|
||||
}
|
||||
|
||||
else:
|
||||
cp_kwargs = {
|
||||
"tensor_parallel_size": 1,
|
||||
"decode_context_parallel_size": 1,
|
||||
"prefill_context_parallel_size": 2,
|
||||
"compilation_config": {
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [4, 8, 24, 48, 60]
|
||||
},
|
||||
}
|
||||
tp_kwargs = {
|
||||
"tensor_parallel_size": 2,
|
||||
"enforce_eager": True,
|
||||
}
|
||||
|
||||
cp_full_kwargs = {}
|
||||
cp_full_kwargs.update(common_kwargs) # type: ignore
|
||||
cp_full_kwargs.update(cp_kwargs) # type: ignore
|
||||
|
||||
tp_full_kwargs = {}
|
||||
tp_full_kwargs.update(common_kwargs) # type: ignore
|
||||
tp_full_kwargs.update(tp_kwargs) # type: ignore
|
||||
with VllmRunner(model, **cp_full_kwargs) as runner: # type: ignore
|
||||
vllm_context_parallel_outputs = runner.generate_greedy(
|
||||
prompts, max_tokens)
|
||||
|
||||
with VllmRunner(model, **tp_full_kwargs) as runner: # type: ignore
|
||||
vllm_eager_outputs = runner.generate_greedy(prompts, max_tokens)
|
||||
|
||||
check_outputs_equal(
|
||||
outputs_0_lst=vllm_eager_outputs,
|
||||
outputs_1_lst=vllm_context_parallel_outputs,
|
||||
name_0="vllm_eager_outputs",
|
||||
name_1="vllm_context_parallel_outputs",
|
||||
)
|
||||
|
||||
|
||||
model = "vllm-ascend/DeepSeek-V2-Lite-W8A8"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("max_tokens", [10])
|
||||
def test_accuracy_dcp_only_graph(max_tokens: int, ) -> None:
|
||||
prompts = [
|
||||
"The president of the United States is", "The capital of France is"
|
||||
]
|
||||
cp_kwargs = {
|
||||
"tensor_parallel_size": 2,
|
||||
"decode_context_parallel_size": 2,
|
||||
"prefill_context_parallel_size": 1,
|
||||
"enable_expert_parallel": True,
|
||||
"compilation_config": {
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [4, 8, 24, 48, 60]
|
||||
},
|
||||
"quantization": "ascend",
|
||||
"max_model_len": 1024,
|
||||
}
|
||||
tp_kwargs = {
|
||||
"tensor_parallel_size": 4,
|
||||
"enable_expert_parallel": True,
|
||||
"enforce_eager": True,
|
||||
"quantization": "ascend",
|
||||
"max_model_len": 1024,
|
||||
}
|
||||
with VllmRunner(model, **cp_kwargs) as runner: # type: ignore
|
||||
vllm_context_parallel_outputs = runner.generate_greedy(
|
||||
prompts, max_tokens)
|
||||
|
||||
with VllmRunner(model, **tp_kwargs) as runner: # type: ignore
|
||||
vllm_eager_outputs = runner.generate_greedy(prompts, max_tokens)
|
||||
|
||||
check_outputs_equal(
|
||||
outputs_0_lst=vllm_eager_outputs,
|
||||
outputs_1_lst=vllm_context_parallel_outputs,
|
||||
name_0="vllm_eager_outputs",
|
||||
name_1="vllm_dcp_only_graph_outputs",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("max_tokens", [10])
|
||||
def test_accuracy_dcp_only_eager(max_tokens: int, ) -> None:
|
||||
prompts = [
|
||||
"The president of the United States is", "The capital of France is"
|
||||
]
|
||||
cp_kwargs = {
|
||||
"tensor_parallel_size": 2,
|
||||
"decode_context_parallel_size": 2,
|
||||
"prefill_context_parallel_size": 1,
|
||||
"enable_expert_parallel": True,
|
||||
"enforce_eager": True,
|
||||
"quantization": "ascend",
|
||||
"max_model_len": 1024,
|
||||
}
|
||||
tp_kwargs = {
|
||||
"tensor_parallel_size": 4,
|
||||
"enable_expert_parallel": True,
|
||||
"enforce_eager": True,
|
||||
"quantization": "ascend",
|
||||
"max_model_len": 1024,
|
||||
}
|
||||
with VllmRunner(model, **cp_kwargs) as runner: # type: ignore
|
||||
vllm_context_parallel_outputs = runner.generate_greedy(
|
||||
prompts, max_tokens)
|
||||
|
||||
with VllmRunner(model, **tp_kwargs) as runner: # type: ignore
|
||||
vllm_eager_outputs = runner.generate_greedy(prompts, max_tokens)
|
||||
|
||||
check_outputs_equal(
|
||||
outputs_0_lst=vllm_eager_outputs,
|
||||
outputs_1_lst=vllm_context_parallel_outputs,
|
||||
name_0="vllm_eager_outputs",
|
||||
name_1="vllm_dcp_only_eager_outputs",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("max_tokens", [10])
|
||||
def test_accuracy_pcp_only(max_tokens: int, ) -> None:
|
||||
prompts = [
|
||||
"The president of the United States is", "The capital of France is"
|
||||
]
|
||||
cp_kwargs = {
|
||||
"tensor_parallel_size": 2,
|
||||
"decode_context_parallel_size": 1,
|
||||
"prefill_context_parallel_size": 2,
|
||||
"enable_expert_parallel": True,
|
||||
"enforce_eager": True,
|
||||
"quantization": "ascend",
|
||||
"max_model_len": 1024,
|
||||
}
|
||||
tp_kwargs = {
|
||||
"tensor_parallel_size": 4,
|
||||
"enable_expert_parallel": True,
|
||||
"enforce_eager": True,
|
||||
"quantization": "ascend",
|
||||
"max_model_len": 1024,
|
||||
}
|
||||
with VllmRunner(model, **cp_kwargs) as runner: # type: ignore
|
||||
vllm_context_parallel_outputs = runner.generate_greedy(
|
||||
prompts, max_tokens)
|
||||
|
||||
with VllmRunner(model, **tp_kwargs) as runner: # type: ignore
|
||||
vllm_eager_outputs = runner.generate_greedy(prompts, max_tokens)
|
||||
|
||||
check_outputs_equal(
|
||||
outputs_0_lst=vllm_eager_outputs,
|
||||
outputs_1_lst=vllm_context_parallel_outputs,
|
||||
name_0="vllm_eager_outputs",
|
||||
name_1="vllm_pcp_only_outputs",
|
||||
)
|
||||
248
tests/e2e/multicard/4-cards/long_sequence/test_basic.py
Normal file
248
tests/e2e/multicard/4-cards/long_sequence/test_basic.py
Normal file
@@ -0,0 +1,248 @@
|
||||
#
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# Copyright 2023 The vLLM team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
|
||||
#
|
||||
import os
|
||||
|
||||
from vllm import SamplingParams
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
|
||||
os.environ["HCCL_BUFFSIZE"] = "768"
|
||||
|
||||
|
||||
def test_models_pcp_dcp_basic():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
|
||||
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
|
||||
with VllmRunner(model,
|
||||
enforce_eager=True,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=2,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128) as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
model = "vllm-ascend/Qwen3-30B-A3B-W8A8"
|
||||
with VllmRunner(
|
||||
model,
|
||||
enforce_eager=True,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=1,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128,
|
||||
quantization="ascend",
|
||||
) as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
def test_models_pcp_dcp_full_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
|
||||
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
|
||||
with VllmRunner(model,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=2,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128,
|
||||
compilation_config={
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [4, 8, 24, 48, 60]
|
||||
}) as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
model = "vllm-ascend/Qwen3-30B-A3B-W8A8"
|
||||
with VllmRunner(model,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=1,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128,
|
||||
quantization="ascend",
|
||||
compilation_config={
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [4, 8, 24, 48, 60]
|
||||
}) as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
def test_models_pcp_dcp_piece_wise():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
|
||||
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
|
||||
with VllmRunner(model,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=2,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
block_size=128) as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
model = "vllm-ascend/Qwen3-30B-A3B-W8A8"
|
||||
with VllmRunner(model,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=1,
|
||||
enable_expert_parallel=True,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
block_size=128,
|
||||
quantization="ascend") as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
def test_pcp_basic():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
|
||||
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
|
||||
with VllmRunner(model,
|
||||
enforce_eager=True,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=1,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128) as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
def test_pcp_full_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
|
||||
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
|
||||
with VllmRunner(model,
|
||||
enforce_eager=False,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=1,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128,
|
||||
compilation_config={
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [4, 8, 24, 48, 60]
|
||||
}) as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
def test_pcp_piece_wise():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
|
||||
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
|
||||
with VllmRunner(model,
|
||||
enforce_eager=False,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=1,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128) as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
def test_dcp_basic():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
|
||||
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
|
||||
with VllmRunner(model,
|
||||
enforce_eager=True,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=4,
|
||||
prefill_context_parallel_size=1,
|
||||
decode_context_parallel_size=2,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128) as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
def test_dcp_full_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
|
||||
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
|
||||
with VllmRunner(model,
|
||||
enforce_eager=False,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=4,
|
||||
prefill_context_parallel_size=1,
|
||||
decode_context_parallel_size=2,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128,
|
||||
compilation_config={
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [4, 8, 24, 48, 60]
|
||||
}) as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
def test_dcp_piece_wise():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "deepseek-ai/DeepSeek-V2-Lite-Chat"
|
||||
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
|
||||
with VllmRunner(model,
|
||||
enforce_eager=False,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=4,
|
||||
prefill_context_parallel_size=1,
|
||||
decode_context_parallel_size=2,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128) as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
@@ -0,0 +1,73 @@
|
||||
#
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# Copyright 2023 The vLLM team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
|
||||
#
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
from unittest.mock import patch
|
||||
|
||||
from vllm import SamplingParams
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
|
||||
|
||||
def generate_prompts(input_len, batchsize):
|
||||
prompts = [
|
||||
" ".join([
|
||||
f"{random.choice(string.ascii_letters)}" for _ in range(input_len)
|
||||
]) for _ in range(batchsize)
|
||||
]
|
||||
return prompts
|
||||
|
||||
|
||||
@patch.dict(
|
||||
os.environ, {
|
||||
"HCCL_BUFFSIZE": "768",
|
||||
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1",
|
||||
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": "1"
|
||||
})
|
||||
def test_models_chunked_prefill_mixed_length_prompts_including_1_token():
|
||||
TEST_ROPE_PARAMETERS = {
|
||||
"rope_theta": 1000000,
|
||||
"rope_type": "yarn",
|
||||
"factor": 4,
|
||||
"original_max_position_embeddings": 32768
|
||||
}
|
||||
prompts = [
|
||||
generate_prompts(128 * 1024, 1)[0],
|
||||
generate_prompts(1, 1)[0],
|
||||
generate_prompts(9104, 1)[0],
|
||||
]
|
||||
sampling_params = SamplingParams(max_tokens=1, temperature=0.0)
|
||||
|
||||
model = "vllm-ascend/Qwen3-30B-A3B-W8A8"
|
||||
with VllmRunner(
|
||||
model,
|
||||
enforce_eager=True,
|
||||
max_num_seqs=2,
|
||||
max_num_batched_tokens=131000,
|
||||
max_model_len=132000,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=1,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128,
|
||||
quantization="ascend",
|
||||
hf_overrides={"rope_parameters": TEST_ROPE_PARAMETERS},
|
||||
) as runner:
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
152
tests/e2e/multicard/4-cards/long_sequence/test_mtp.py
Normal file
152
tests/e2e/multicard/4-cards/long_sequence/test_mtp.py
Normal file
@@ -0,0 +1,152 @@
|
||||
#
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# Copyright 2023 The vLLM team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
|
||||
os.environ["HCCL_BUFFSIZE"] = "512"
|
||||
|
||||
|
||||
def test_pcp_dcp_mtp1_eager():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "wemaster/deepseek_mtp_main_random_bf16"
|
||||
with VllmRunner(
|
||||
model,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=2,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128,
|
||||
speculative_config={
|
||||
"num_speculative_tokens": 1,
|
||||
"method": "deepseek_mtp",
|
||||
},
|
||||
enforce_eager=True,
|
||||
) as runner:
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
|
||||
def test_pcp_dcp_mtp3_eager():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "wemaster/deepseek_mtp_main_random_bf16"
|
||||
with VllmRunner(
|
||||
model,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=2,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128,
|
||||
speculative_config={
|
||||
"num_speculative_tokens": 3,
|
||||
"method": "deepseek_mtp",
|
||||
},
|
||||
enforce_eager=True,
|
||||
) as runner:
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
|
||||
def test_pcp_dcp_mtp3_piecewise_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "wemaster/deepseek_mtp_main_random_bf16"
|
||||
with VllmRunner(
|
||||
model,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=2,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128,
|
||||
speculative_config={
|
||||
"num_speculative_tokens": 3,
|
||||
"method": "deepseek_mtp",
|
||||
},
|
||||
compilation_config={
|
||||
"cudagraph_mode": "PIECEWISE",
|
||||
"cudagraph_capture_sizes": [4, 8, 16],
|
||||
},
|
||||
) as runner:
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
|
||||
def test_pcp_dcp_mtp3_full_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "wemaster/deepseek_mtp_main_random_bf16"
|
||||
with VllmRunner(
|
||||
model,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
prefill_context_parallel_size=2,
|
||||
decode_context_parallel_size=2,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128,
|
||||
speculative_config={
|
||||
"num_speculative_tokens": 3,
|
||||
"method": "deepseek_mtp",
|
||||
},
|
||||
compilation_config={
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [4, 8, 16],
|
||||
},
|
||||
) as runner:
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
|
||||
def test_dcp_mtp3_full_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
"The president of United States is", "AI future is"
|
||||
]
|
||||
model = "wemaster/deepseek_mtp_main_random_bf16"
|
||||
with VllmRunner(
|
||||
model,
|
||||
max_model_len=1024,
|
||||
tensor_parallel_size=2,
|
||||
decode_context_parallel_size=2,
|
||||
max_num_batched_tokens=1024,
|
||||
enable_expert_parallel=True,
|
||||
block_size=128,
|
||||
speculative_config={
|
||||
"num_speculative_tokens": 3,
|
||||
"method": "deepseek_mtp",
|
||||
},
|
||||
compilation_config={
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [4, 8, 16],
|
||||
},
|
||||
) as runner:
|
||||
runner.generate_greedy(prompts, 32)
|
||||
154
tests/e2e/multicard/4-cards/spec_decode/test_mtp_qwen3_next.py
Normal file
154
tests/e2e/multicard/4-cards/spec_decode/test_mtp_qwen3_next.py
Normal file
@@ -0,0 +1,154 @@
|
||||
#
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# Copyright 2023 The vLLM team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
|
||||
#
|
||||
"""Compare the short outputs of HF and vLLM when using greedy sampling.
|
||||
|
||||
Run `pytest tests/e2e/multicard/spec_decode/test_mtp_qwen3_next.py`.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
from vllm.config import CompilationConfig
|
||||
from vllm.v1.metrics.reader import Counter, Vector
|
||||
|
||||
from tests.e2e.conftest import VllmRunner, cleanup_dist_env_and_memory
|
||||
|
||||
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
|
||||
|
||||
MODELS = ["Qwen/Qwen3-Next-80B-A3B-Instruct"]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_name", MODELS)
|
||||
def test_qwen3_next_mtp_acceptance_tp4(model_name):
|
||||
golden = [0.85, 0.46, 0.19]
|
||||
|
||||
example_prompts = [
|
||||
"Hello, my name is",
|
||||
"The president of the United States is",
|
||||
"The capital of France is",
|
||||
"The future of AI is",
|
||||
]
|
||||
|
||||
max_tokens = 1024
|
||||
|
||||
with VllmRunner(model_name,
|
||||
tensor_parallel_size=4,
|
||||
max_model_len=4096,
|
||||
gpu_memory_utilization=0.8,
|
||||
distributed_executor_backend="mp",
|
||||
disable_log_stats=False,
|
||||
speculative_config={
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"method": "qwen3_next_mtp",
|
||||
"num_speculative_tokens": 3,
|
||||
},
|
||||
compilation_config=CompilationConfig(
|
||||
cudagraph_capture_sizes=[20])) as spec_vllm_model:
|
||||
_ = spec_vllm_model.generate_greedy(example_prompts, max_tokens)
|
||||
metrics = spec_vllm_model.model.get_metrics()
|
||||
|
||||
num_drafts = 0
|
||||
num_accepted_tokens_per_pos = [0] * 3
|
||||
for metric in metrics:
|
||||
if metric.name == "vllm:spec_decode_num_drafts":
|
||||
assert isinstance(metric, Counter)
|
||||
num_drafts += metric.value
|
||||
elif metric.name == "vllm:spec_decode_num_accepted_tokens_per_pos":
|
||||
assert isinstance(metric, Vector)
|
||||
for pos in range(len(metric.values)):
|
||||
num_accepted_tokens_per_pos[pos] += metric.values[pos]
|
||||
|
||||
acceptance_per_pos = [
|
||||
num_accepted_tokens / num_drafts
|
||||
for num_accepted_tokens in num_accepted_tokens_per_pos
|
||||
]
|
||||
|
||||
match = all(abs(a - b) < 0.05 for a, b in zip(acceptance_per_pos, golden))
|
||||
if not match:
|
||||
print(f"acceptance_per_pos: {acceptance_per_pos}")
|
||||
print(f"golden: {golden}")
|
||||
|
||||
assert match
|
||||
cleanup_dist_env_and_memory()
|
||||
|
||||
|
||||
# FIXME: When applying `FULL_DECODE_ONLY` in this e2e, ci will fail.
|
||||
# The failure can not be reproduced locally.
|
||||
@pytest.mark.parametrize("model_name", MODELS)
|
||||
@pytest.mark.parametrize("num_speculative_tokens", [1])
|
||||
@pytest.mark.parametrize("disable_padded_drafter_batch", [True, False])
|
||||
def test_qwen3_next_mtp_correctness_tp4(model_name: str,
|
||||
num_speculative_tokens: int,
|
||||
disable_padded_drafter_batch: bool):
|
||||
example_prompts = [
|
||||
"Hello, my name is",
|
||||
"The president of the United States is",
|
||||
"The capital of France is",
|
||||
"The future of AI is",
|
||||
]
|
||||
|
||||
max_tokens = 20
|
||||
'''
|
||||
Compare the outputs of a original LLM and a speculative LLM
|
||||
should be the same when using mtp speculative decoding.
|
||||
'''
|
||||
with VllmRunner(model_name,
|
||||
tensor_parallel_size=4,
|
||||
max_model_len=4096,
|
||||
gpu_memory_utilization=0.8,
|
||||
distributed_executor_backend="mp",
|
||||
speculative_config={
|
||||
"method":
|
||||
"mtp",
|
||||
"num_speculative_tokens":
|
||||
num_speculative_tokens,
|
||||
"disable_padded_drafter_batch":
|
||||
disable_padded_drafter_batch,
|
||||
},
|
||||
compilation_config=CompilationConfig(
|
||||
cudagraph_capture_sizes=[20])) as spec_llm:
|
||||
spec_outputs = spec_llm.generate_greedy(example_prompts, max_tokens)
|
||||
del spec_llm
|
||||
|
||||
with VllmRunner(model_name,
|
||||
tensor_parallel_size=4,
|
||||
max_model_len=4096,
|
||||
gpu_memory_utilization=0.8,
|
||||
distributed_executor_backend="mp",
|
||||
compilation_config=CompilationConfig(
|
||||
cudagraph_capture_sizes=[20])) as ref_llm:
|
||||
ref_outputs = ref_llm.generate_greedy(example_prompts, max_tokens)
|
||||
del ref_llm
|
||||
|
||||
matches = 0
|
||||
misses = 0
|
||||
for ref_output, spec_output in zip(ref_outputs, spec_outputs):
|
||||
ref_token_ids = ref_output[0]
|
||||
spec_token_ids = spec_output[0]
|
||||
if ref_token_ids == spec_token_ids[:len(ref_token_ids)]:
|
||||
matches += 1
|
||||
else:
|
||||
misses += 1
|
||||
print(f"ref_output: {ref_output[1]}")
|
||||
print(f"spec_output: {spec_output[1]}")
|
||||
|
||||
# Heuristic: expect at least 66% of the prompts to match exactly
|
||||
# Upon failure, inspect the outputs to check for inaccuracy.
|
||||
assert matches > int(0.66 * len(ref_outputs))
|
||||
cleanup_dist_env_and_memory()
|
||||
48
tests/e2e/multicard/4-cards/test_data_parallel_tp2.py
Normal file
48
tests/e2e/multicard/4-cards/test_data_parallel_tp2.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
MODELS = ["Qwen/Qwen3-30B-A3B"]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
@pytest.mark.parametrize("max_tokens", [32])
|
||||
@patch.dict(os.environ, {"ASCEND_RT_VISIBLE_DEVICES": "0,1,2,3"})
|
||||
def test_qwen3_inference_dp2_tp2(model, max_tokens):
|
||||
script = "examples/offline_data_parallel.py"
|
||||
|
||||
env = os.environ.copy()
|
||||
|
||||
cmd = [
|
||||
sys.executable,
|
||||
script,
|
||||
"--model",
|
||||
model,
|
||||
"--dp-size",
|
||||
"2",
|
||||
"--tp-size",
|
||||
"2",
|
||||
"--node-size",
|
||||
"1",
|
||||
"--node-rank",
|
||||
"0",
|
||||
"--trust-remote-code",
|
||||
]
|
||||
|
||||
print(f"Running subprocess: {' '.join(cmd)}")
|
||||
proc = subprocess.run(cmd,
|
||||
env=env,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
timeout=600)
|
||||
output = proc.stdout.decode(errors='ignore')
|
||||
|
||||
print(output)
|
||||
|
||||
assert "DP rank 0 needs to process" in output
|
||||
assert "DP rank 1 needs to process" in output
|
||||
assert "Generated text:" in output
|
||||
assert proc.returncode == 0
|
||||
44
tests/e2e/multicard/4-cards/test_kimi_k2.py
Normal file
44
tests/e2e/multicard/4-cards/test_kimi_k2.py
Normal file
@@ -0,0 +1,44 @@
|
||||
#
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# Copyright 2023 The vLLM team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
|
||||
#
|
||||
import os
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
|
||||
os.environ["PYTORCH_NPU_ALLOC_CONF"] = "max_split_size_mb:256"
|
||||
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
|
||||
|
||||
|
||||
def test_kimi_k2_thinking_w4a16_tp4():
|
||||
example_prompts = [
|
||||
"Hello, my name is",
|
||||
]
|
||||
max_tokens = 5
|
||||
|
||||
with VllmRunner(
|
||||
"vllm-ascend/Kimi-K2-Thinking-Pruning",
|
||||
max_model_len=8192,
|
||||
dtype="auto",
|
||||
tensor_parallel_size=4,
|
||||
enable_expert_parallel=True,
|
||||
compilation_config={
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [1],
|
||||
},
|
||||
) as vllm_model:
|
||||
vllm_model.generate_greedy(example_prompts, max_tokens)
|
||||
77
tests/e2e/multicard/4-cards/test_qwen3_next.py
Normal file
77
tests/e2e/multicard/4-cards/test_qwen3_next.py
Normal file
@@ -0,0 +1,77 @@
|
||||
#
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# Copyright 2023 The vLLM team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
|
||||
#
|
||||
import os
|
||||
from unittest.mock import patch
|
||||
|
||||
from modelscope import snapshot_download # type: ignore
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
|
||||
|
||||
def test_qwen3_next_distributed_mp_tp4():
|
||||
example_prompts = [
|
||||
"Hello, my name is",
|
||||
] * 4
|
||||
max_tokens = 5
|
||||
with VllmRunner("Qwen/Qwen3-Next-80B-A3B-Instruct",
|
||||
tensor_parallel_size=4,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
max_model_len=4096,
|
||||
gpu_memory_utilization=0.8,
|
||||
distributed_executor_backend="mp") as vllm_model:
|
||||
vllm_model.generate_greedy(example_prompts, max_tokens)
|
||||
del vllm_model
|
||||
|
||||
|
||||
def test_qwen3_next_distributed_mp_full_decode_only_tp4():
|
||||
example_prompts = [
|
||||
"Hello, my name is",
|
||||
] * 4
|
||||
max_tokens = 5
|
||||
with VllmRunner("Qwen/Qwen3-Next-80B-A3B-Instruct",
|
||||
tensor_parallel_size=4,
|
||||
max_model_len=4096,
|
||||
gpu_memory_utilization=0.8,
|
||||
distributed_executor_backend="mp",
|
||||
compilation_config={
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [1, 8, 24, 48, 60]
|
||||
}) as vllm_model:
|
||||
vllm_model.generate_greedy(example_prompts, max_tokens)
|
||||
del vllm_model
|
||||
|
||||
|
||||
# TODO: will conduct accuracy verification after the subsequent version becomes stable
|
||||
@patch.dict(os.environ, {"HCCL_BUFFSIZE": "1024"})
|
||||
def test_qwen3_next_w8a8dynamic_distributed_tp4_ep():
|
||||
example_prompts = [
|
||||
"Hello, my name is",
|
||||
]
|
||||
max_tokens = 5
|
||||
with VllmRunner(
|
||||
snapshot_download("vllm-ascend/Qwen3-Next-80B-A3B-Instruct-W8A8"),
|
||||
max_model_len=4096,
|
||||
tensor_parallel_size=4,
|
||||
gpu_memory_utilization=0.4,
|
||||
max_num_seqs=1,
|
||||
enable_expert_parallel=True,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
quantization="ascend",
|
||||
) as vllm_model:
|
||||
vllm_model.generate_greedy(example_prompts, max_tokens)
|
||||
Reference in New Issue
Block a user