diff --git a/.github/workflows/_e2e_test.yaml b/.github/workflows/_e2e_test.yaml index 17fb04ff..96ae9578 100644 --- a/.github/workflows/_e2e_test.yaml +++ b/.github/workflows/_e2e_test.yaml @@ -222,6 +222,7 @@ jobs: VLLM_WORKER_MULTIPROC_METHOD: spawn if: ${{ inputs.type == 'full' }} run: | + pytest -sv --durations=0 tests/e2e/multicard/2-cards/test_qwen3_performance.py pytest -sv --durations=0 tests/e2e/multicard/2-cards/test_data_parallel.py pytest -sv --durations=0 tests/e2e/multicard/2-cards/test_expert_parallel.py pytest -sv --durations=0 tests/e2e/multicard/2-cards/test_external_launcher.py diff --git a/.github/workflows/nightly_test_a2.yaml b/.github/workflows/nightly_test_a2.yaml index 736fcb27..b772b300 100644 --- a/.github/workflows/nightly_test_a2.yaml +++ b/.github/workflows/nightly_test_a2.yaml @@ -49,9 +49,6 @@ jobs: fail-fast: false matrix: test_config: - - name: qwen3-8b - os: linux-aarch64-a2-1 - tests: tests/e2e/nightly/single_node/models/test_qwen3_8b.py - name: qwen3next os: linux-aarch64-a2-4 tests: tests/e2e/nightly/single_node/models/test_qwen3_next.py diff --git a/tests/e2e/nightly/single_node/models/test_qwen3_8b.py b/tests/e2e/multicard/2-cards/test_qwen3_performance.py similarity index 95% rename from tests/e2e/nightly/single_node/models/test_qwen3_8b.py rename to tests/e2e/multicard/2-cards/test_qwen3_performance.py index 0f0ae383..e8a6e51e 100644 --- a/tests/e2e/nightly/single_node/models/test_qwen3_8b.py +++ b/tests/e2e/multicard/2-cards/test_qwen3_performance.py @@ -37,14 +37,14 @@ api_keyword_args = { vllm_bench_cases = { "dataset-name": "random", - "num_prompts": 1000, + "num_prompts": 500, "request_rate": 20, "random_input_len": 128, "max_concurrency": 40, "random_output_len": 100, } -baseline_throughput = 1622.08 # baseline throughput for Qwen3-8B +baseline_throughput = 1600.0 # baseline throughput for Qwen3-8B, measured with num_prompts=500 @pytest.mark.parametrize("model", MODELS)