Test no vllm custom allreduce (#4210)

This commit is contained in:
Lianmin Zheng
2025-03-08 05:23:06 -08:00
committed by GitHub
parent 4a893d142d
commit 2cadd51d11
2 changed files with 6 additions and 2 deletions

View File

@@ -269,6 +269,8 @@ jobs:
cd test/srt cd test/srt
python3 -m unittest test_bench_one_batch.TestBenchOneBatch.test_moe_tp2_bs1 python3 -m unittest test_bench_one_batch.TestBenchOneBatch.test_moe_tp2_bs1
USE_VLLM_CUSTOM_ALLREDUCE=0 python3 -m unittest test_bench_one_batch.TestBenchOneBatch.test_moe_tp2_bs1
- name: Benchmark single latency + torch.compile (TP=2) - name: Benchmark single latency + torch.compile (TP=2)
timeout-minutes: 10 timeout-minutes: 10
run: | run: |

View File

@@ -11,7 +11,9 @@ from sglang.test.test_utils import (
class TestBenchOneBatch(unittest.TestCase): class TestBenchOneBatch(unittest.TestCase):
def test_bs1(self): def test_bs1(self):
output_throughput = run_bench_one_batch(DEFAULT_MODEL_NAME_FOR_TEST, []) output_throughput = run_bench_one_batch(
DEFAULT_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
)
if is_in_ci(): if is_in_ci():
write_github_step_summary( write_github_step_summary(
@@ -22,7 +24,7 @@ class TestBenchOneBatch(unittest.TestCase):
def test_moe_tp2_bs1(self): def test_moe_tp2_bs1(self):
output_throughput = run_bench_one_batch( output_throughput = run_bench_one_batch(
DEFAULT_MOE_MODEL_NAME_FOR_TEST, ["--tp", "2"] DEFAULT_MOE_MODEL_NAME_FOR_TEST, ["--tp", "2", "--cuda-graph-max-bs", "2"]
) )
if is_in_ci(): if is_in_ci():