2025-12-01 20:44:11 +08:00
|
|
|
import os
|
|
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
from vllm import SamplingParams
|
|
|
|
|
|
|
|
|
|
from tests.e2e.conftest import VllmRunner
|
|
|
|
|
from tests.e2e.model_utils import check_outputs_equal
|
|
|
|
|
|
|
|
|
|
MODELS = [
|
2025-12-12 08:42:08 +08:00
|
|
|
"deepseek-ai/DeepSeek-V2-Lite",
|
2025-12-01 20:44:11 +08:00
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("model", MODELS)
|
2025-12-12 08:42:08 +08:00
|
|
|
def test_deepseek_v2_lite_enable_shared_expert_dp_tp2(model: str) -> None:
|
2026-03-10 09:52:50 +08:00
|
|
|
if "HCCL_OP_EXPANSION_MODE" in os.environ:
|
|
|
|
|
del os.environ["HCCL_OP_EXPANSION_MODE"]
|
2025-12-01 20:44:11 +08:00
|
|
|
|
|
|
|
|
prompts = [
|
2026-03-10 09:52:50 +08:00
|
|
|
"Hello, my name is",
|
|
|
|
|
"The capital of the United States is",
|
|
|
|
|
"The capital of France is",
|
|
|
|
|
"The future of AI is",
|
2025-12-01 20:44:11 +08:00
|
|
|
]
|
|
|
|
|
sampling_params = SamplingParams(max_tokens=32, temperature=0.0)
|
|
|
|
|
|
|
|
|
|
with VllmRunner(
|
2026-03-10 09:52:50 +08:00
|
|
|
model,
|
|
|
|
|
max_model_len=1024,
|
|
|
|
|
enforce_eager=True,
|
|
|
|
|
tensor_parallel_size=2,
|
|
|
|
|
enable_expert_parallel=True,
|
2025-12-01 20:44:11 +08:00
|
|
|
) as runner:
|
|
|
|
|
vllm_eager_outputs = runner.model.generate(prompts, sampling_params)
|
|
|
|
|
|
|
|
|
|
os.environ["VLLM_ASCEND_ENABLE_FLASHCOMM1"] = "1"
|
|
|
|
|
with VllmRunner(
|
2026-03-10 09:52:50 +08:00
|
|
|
model,
|
|
|
|
|
max_model_len=1024,
|
|
|
|
|
enforce_eager=True,
|
|
|
|
|
tensor_parallel_size=2,
|
|
|
|
|
enable_expert_parallel=True,
|
|
|
|
|
additional_config={
|
|
|
|
|
"enable_shared_expert_dp": True,
|
|
|
|
|
},
|
2025-12-01 20:44:11 +08:00
|
|
|
) as runner:
|
2026-03-10 09:52:50 +08:00
|
|
|
shared_expert_dp_eager_outputs = runner.model.generate(prompts, sampling_params)
|
2025-12-01 20:44:11 +08:00
|
|
|
|
|
|
|
|
with VllmRunner(
|
2026-03-10 09:52:50 +08:00
|
|
|
model,
|
|
|
|
|
max_model_len=1024,
|
|
|
|
|
tensor_parallel_size=2,
|
|
|
|
|
enable_expert_parallel=True,
|
|
|
|
|
compilation_config={
|
|
|
|
|
"cudagraph_capture_sizes": [1, 4, 8, 16],
|
|
|
|
|
"cudagraph_mode": "FULL_DECODE_ONLY",
|
|
|
|
|
},
|
|
|
|
|
additional_config={
|
|
|
|
|
"enable_shared_expert_dp": True,
|
|
|
|
|
},
|
2025-12-01 20:44:11 +08:00
|
|
|
) as runner:
|
2026-03-10 09:52:50 +08:00
|
|
|
shared_expert_dp_aclgraph_outputs = runner.model.generate(prompts, sampling_params)
|
2025-12-01 20:44:11 +08:00
|
|
|
|
|
|
|
|
vllm_eager_outputs_list = []
|
|
|
|
|
for output in vllm_eager_outputs:
|
2026-03-10 09:52:50 +08:00
|
|
|
vllm_eager_outputs_list.append((output.outputs[0].index, output.outputs[0].text))
|
2025-12-01 20:44:11 +08:00
|
|
|
|
|
|
|
|
shared_expert_dp_eager_outputs_list = []
|
|
|
|
|
for output in shared_expert_dp_eager_outputs:
|
2026-03-10 09:52:50 +08:00
|
|
|
shared_expert_dp_eager_outputs_list.append((output.outputs[0].index, output.outputs[0].text))
|
2025-12-01 20:44:11 +08:00
|
|
|
|
|
|
|
|
shared_expert_dp_aclgraph_outputs_list = []
|
|
|
|
|
for output in shared_expert_dp_aclgraph_outputs:
|
2026-03-10 09:52:50 +08:00
|
|
|
shared_expert_dp_aclgraph_outputs_list.append((output.outputs[0].index, output.outputs[0].text))
|
2025-12-01 20:44:11 +08:00
|
|
|
|
|
|
|
|
check_outputs_equal(
|
|
|
|
|
outputs_0_lst=vllm_eager_outputs_list,
|
|
|
|
|
outputs_1_lst=shared_expert_dp_eager_outputs_list,
|
|
|
|
|
name_0="vllm_eager_outputs",
|
|
|
|
|
name_1="shared_expert_dp_eager_outputs",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
check_outputs_equal(
|
|
|
|
|
outputs_0_lst=vllm_eager_outputs_list,
|
|
|
|
|
outputs_1_lst=shared_expert_dp_aclgraph_outputs_list,
|
|
|
|
|
name_0="vllm_eager_outputs",
|
|
|
|
|
name_1="shared_expert_dp_aclgraph_outputs",
|
|
|
|
|
)
|