2025-07-21 09:08:04 +08:00
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
|
from tests.e2e.conftest import VllmRunner
|
|
|
|
|
from tests.e2e.model_utils import check_outputs_equal
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("model_name", ["deepseek-ai/DeepSeek-V2-Lite-Chat"])
|
2025-12-11 20:35:32 +08:00
|
|
|
def test_deepseek_correctness_ep(model_name):
|
2025-07-21 09:08:04 +08:00
|
|
|
example_prompts = [
|
|
|
|
|
"Hello, my name is",
|
|
|
|
|
"The president of the United States is",
|
|
|
|
|
"The capital of France is",
|
|
|
|
|
"The future of AI is",
|
|
|
|
|
]
|
|
|
|
|
max_tokens = 5
|
|
|
|
|
|
2025-09-16 14:13:07 +08:00
|
|
|
# FIXME: Really strange that chunked prefill might lead to different results, investigate further
|
2025-12-23 18:42:35 +08:00
|
|
|
with VllmRunner(model_name, tensor_parallel_size=2) as vllm_model:
|
2025-07-21 09:08:04 +08:00
|
|
|
tp_output = vllm_model.generate_greedy(example_prompts, max_tokens)
|
|
|
|
|
|
2025-12-01 20:33:50 +08:00
|
|
|
with VllmRunner(model_name,
|
|
|
|
|
tensor_parallel_size=2,
|
2025-12-23 18:42:35 +08:00
|
|
|
enable_expert_parallel=True) as vllm_model:
|
2025-07-21 09:08:04 +08:00
|
|
|
ep_output = vllm_model.generate_greedy(example_prompts, max_tokens)
|
|
|
|
|
|
|
|
|
|
check_outputs_equal(
|
|
|
|
|
outputs_0_lst=ep_output,
|
|
|
|
|
outputs_1_lst=tp_output,
|
|
|
|
|
name_0="ep_output",
|
|
|
|
|
name_1="tp_output",
|
|
|
|
|
)
|