Files
xc-llm-ascend/tests/e2e/multicard/test_shared_expert_dp.py
zhangxinyuehfad bfafe30953 [CI] refect e2e test (#4799)
### What this PR does / why we need it?
This PR updates the CI configuration and adjusts a set of end-to-end
(e2e) tests under tests/e2e/multicard, in order to refactor the test
suite and ensure compatibility with current codebase and CI workflows.

1. tests/e2e/multicard/test_prefix_caching.py: change model to Qwen3-8B
and rename the test case
2. tests/e2e/multicard/test_quantization.py: rename the test case
3. tests/e2e/multicard/test_qwen3_moe.py: remove duplicate test and
rename test cases
4. tests/e2e/multicard/test_qwen3_next.py: rename test cases and change
the W8A8 pruning model to the W8A8 model and remove the eager parameter
5. tests/e2e/multicard/test_shared_expert_dp.py: rename test case and
remove the eager parameter
6. tests/e2e/multicard/test_single_request_aclgraph.py: rename test case
and change Qwen3-30B to Qwen3-0.6B
7. tests/e2e/multicard/test_torchair_graph_mode.py: delete test cases
about torchair

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

Signed-off-by: hfadzxy <starmoon_zhang@163.com>
2025-12-12 08:42:08 +08:00

94 lines
3.0 KiB
Python

import os
import pytest
from vllm import SamplingParams
from tests.e2e.conftest import VllmRunner
from tests.e2e.model_utils import check_outputs_equal
MODELS = [
"deepseek-ai/DeepSeek-V2-Lite",
]
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
@pytest.mark.parametrize("model", MODELS)
def test_deepseek_v2_lite_enable_shared_expert_dp_tp2(model: str) -> None:
if 'HCCL_OP_EXPANSION_MODE' in os.environ:
del os.environ['HCCL_OP_EXPANSION_MODE']
prompts = [
"Hello, my name is", "The capital of the United States is",
"The capital of France is", "The future of AI is"
]
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
with VllmRunner(
model,
max_model_len=1024,
enforce_eager=True,
tensor_parallel_size=2,
enable_expert_parallel=True,
) as runner:
vllm_eager_outputs = runner.model.generate(prompts, sampling_params)
os.environ["VLLM_ASCEND_ENABLE_FLASHCOMM1"] = "1"
with VllmRunner(
model,
max_model_len=1024,
enforce_eager=True,
tensor_parallel_size=2,
enable_expert_parallel=True,
additional_config={
"enable_shared_expert_dp": True,
},
) as runner:
shared_expert_dp_eager_outputs = runner.model.generate(
prompts, sampling_params)
with VllmRunner(
model,
max_model_len=1024,
tensor_parallel_size=2,
enable_expert_parallel=True,
compilation_config={
"cudagraph_capture_sizes": [1, 4, 8, 16],
"cudagraph_mode": "FULL_DECODE_ONLY",
},
additional_config={
"enable_shared_expert_dp": True,
},
) as runner:
shared_expert_dp_aclgraph_outputs = runner.model.generate(
prompts, sampling_params)
vllm_eager_outputs_list = []
for output in vllm_eager_outputs:
vllm_eager_outputs_list.append(
(output.outputs[0].index, output.outputs[0].text))
shared_expert_dp_eager_outputs_list = []
for output in shared_expert_dp_eager_outputs:
shared_expert_dp_eager_outputs_list.append(
(output.outputs[0].index, output.outputs[0].text))
shared_expert_dp_aclgraph_outputs_list = []
for output in shared_expert_dp_aclgraph_outputs:
shared_expert_dp_aclgraph_outputs_list.append(
(output.outputs[0].index, output.outputs[0].text))
check_outputs_equal(
outputs_0_lst=vllm_eager_outputs_list,
outputs_1_lst=shared_expert_dp_eager_outputs_list,
name_0="vllm_eager_outputs",
name_1="shared_expert_dp_eager_outputs",
)
check_outputs_equal(
outputs_0_lst=vllm_eager_outputs_list,
outputs_1_lst=shared_expert_dp_aclgraph_outputs_list,
name_0="vllm_eager_outputs",
name_1="shared_expert_dp_aclgraph_outputs",
)