Refactor e2e CI (#2276)
Refactor E2E CI to make it clear and faster
1. remove some uesless e2e test
2. remove some uesless function
3. Make sure all test runs with VLLMRunner to avoid oom error
4. Make sure all ops test end with torch.empty_cache to avoid oom error
5. run the test one by one to avoid resource limit error
- vLLM version: v0.10.1.1
- vLLM main:
a344a5aa0a
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
50
.github/workflows/vllm_ascend_test.yaml
vendored
50
.github/workflows/vllm_ascend_test.yaml
vendored
@@ -191,27 +191,29 @@ jobs:
|
||||
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
run: |
|
||||
pytest -sv tests/e2e/singlecard/test_offline_inference.py
|
||||
# pytest -sv tests/e2e/singlecard/test_ilama_lora.py
|
||||
pytest -sv tests/e2e/singlecard/test_guided_decoding.py
|
||||
# We found that if running aclgraph tests in batch, it will cause AclmdlRICaptureBegin error. So we run
|
||||
# the test separately.
|
||||
|
||||
pytest -sv tests/e2e/singlecard/test_aclgraph.py
|
||||
pytest -sv tests/e2e/singlecard/test_ascend_scheduler.py
|
||||
pytest -sv tests/e2e/singlecard/test_camem.py
|
||||
pytest -sv tests/e2e/singlecard/test_chunked.py
|
||||
pytest -sv tests/e2e/singlecard/test_embedding.py
|
||||
pytest -sv tests/e2e/singlecard/test_guided_decoding.py
|
||||
# TODO: Fix lora accuracy error
|
||||
# pytest -sv tests/e2e/singlecard/test_ilama_lora.py
|
||||
pytest -sv tests/e2e/singlecard/test_profile_execute_duration.py
|
||||
pytest -sv tests/e2e/singlecard/test_quantization.py
|
||||
pytest -sv tests/e2e/singlecard/test_sampler.py
|
||||
pytest -sv tests/e2e/singlecard/test_vlm.py
|
||||
|
||||
# ------------------------------------ v1 spec decode test ------------------------------------ #
|
||||
pytest -sv tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py
|
||||
# TODO: revert me when test_v1_spec_decode.py::test_ngram_correctness is fixed
|
||||
pytest -sv tests/e2e/singlecard/spec_decode_v1/test_v1_spec_decode.py
|
||||
|
||||
# All other tests, ignore: 310p test, accuracy test.
|
||||
pytest -sv tests/e2e/singlecard/ \
|
||||
--ignore=tests/e2e/singlecard/test_offline_inference.py \
|
||||
--ignore=tests/e2e/singlecard/test_ilama_lora.py \
|
||||
--ignore=tests/e2e/singlecard/test_guided_decoding.py \
|
||||
--ignore=tests/e2e/singlecard/test_camem.py \
|
||||
--ignore=tests/e2e/singlecard/test_embedding.py \
|
||||
--ignore=tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py \
|
||||
--ignore=tests/e2e/singlecard/spec_decode_v1/test_v1_spec_decode.py \
|
||||
--ignore=tests/e2e/singlecard/test_offline_inference_310p.py
|
||||
pytest -sv tests/e2e/singlecard/ops/
|
||||
|
||||
e2e-2-cards:
|
||||
needs: [e2e]
|
||||
if: ${{ needs.e2e.result == 'success' }}
|
||||
@@ -273,17 +275,23 @@ jobs:
|
||||
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
run: |
|
||||
pytest -sv tests/e2e/multicard/test_data_parallel.py
|
||||
pytest -sv tests/e2e/multicard/test_expert_parallel.py
|
||||
# external_launcher test is not stable enough. Fix it later
|
||||
# pytest -sv tests/e2e/multicard/test_external_launcher.py
|
||||
pytest -sv tests/e2e/multicard/test_fused_moe_allgather_ep.py
|
||||
# pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py
|
||||
# Fixme: run VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py will raise error.
|
||||
|
||||
# To avoid oom, we need to run the test in a single process.
|
||||
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_multistream_moe
|
||||
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_QwQ
|
||||
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeekV3_dbo
|
||||
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_multistream_moe
|
||||
#pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_pangu
|
||||
#pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_Qwen3_W8A8
|
||||
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_Qwen3_W4A8DYNAMIC
|
||||
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_W4A8DYNAMIC
|
||||
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_sp_for_qwen3_moe
|
||||
pytest -sv tests/e2e/multicard/test_data_parallel.py
|
||||
pytest -sv tests/e2e/multicard/ --ignore=tests/e2e/multicard/test_ilama_lora_tp2.py \
|
||||
--ignore=tests/e2e/multicard/test_offline_inference_distributed.py \
|
||||
--ignore=tests/e2e/multicard/test_data_parallel.py \
|
||||
--ignore=tests/e2e/multicard/test_offline_inference_310p.py
|
||||
|
||||
#pytest -sv tests/e2e/multicard/test_pipeline_parallel.py
|
||||
#pytest -sv tests/e2e/multicard/test_prefix_caching.py
|
||||
#pytest -sv tests/e2e/multicard/test_qwen3_moe.py
|
||||
#pytest -sv tests/e2e/multicard/test_torchair_graph_mode.py
|
||||
|
||||
6
.github/workflows/vllm_ascend_test_310p.yaml
vendored
6
.github/workflows/vllm_ascend_test_310p.yaml
vendored
@@ -111,7 +111,7 @@ jobs:
|
||||
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "linux-aarch64-310p-1" ]]; then
|
||||
pytest -sv tests/e2e/singlecard/test_offline_inference_310p.py
|
||||
pytest -sv tests/e2e/310p/test_offline_inference_310p.py
|
||||
else
|
||||
pytest -sv tests/e2e/multicard/test_offline_inference_310p.py
|
||||
fi
|
||||
pytest -sv tests/e2e/310p/test_offline_inference_parallel_310p.py
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user