Refactor E2E CI to make it clear and faster
1. remove some uesless e2e test
2. remove some uesless function
3. Make sure all test runs with VLLMRunner to avoid oom error
4. Make sure all ops test end with torch.empty_cache to avoid oom error
5. run the test one by one to avoid resource limit error
- vLLM version: v0.10.1.1
- vLLM main:
a344a5aa0a
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
33 lines
1.0 KiB
Python
33 lines
1.0 KiB
Python
import pytest
|
|
|
|
from tests.e2e.conftest import VllmRunner
|
|
from tests.e2e.model_utils import check_outputs_equal
|
|
|
|
|
|
@pytest.mark.parametrize("model_name", ["deepseek-ai/DeepSeek-V2-Lite-Chat"])
|
|
def test_e2e_ep_correctness(model_name):
|
|
example_prompts = [
|
|
"Hello, my name is",
|
|
"The president of the United States is",
|
|
"The capital of France is",
|
|
"The future of AI is",
|
|
]
|
|
max_tokens = 5
|
|
|
|
with VllmRunner(model_name, tensor_parallel_size=2,
|
|
enforce_eager=True) as vllm_model:
|
|
tp_output = vllm_model.generate_greedy(example_prompts, max_tokens)
|
|
|
|
with VllmRunner(model_name,
|
|
tensor_parallel_size=2,
|
|
enable_expert_parallel=True,
|
|
enforce_eager=True) as vllm_model:
|
|
ep_output = vllm_model.generate_greedy(example_prompts, max_tokens)
|
|
|
|
check_outputs_equal(
|
|
outputs_0_lst=ep_output,
|
|
outputs_1_lst=tp_output,
|
|
name_0="ep_output",
|
|
name_1="tp_output",
|
|
)
|