Update CI threshold (#2186)

This commit is contained in:
Lianmin Zheng
2024-11-25 15:24:17 -08:00
committed by GitHub
parent 10189d08dd
commit 3c5538f781
2 changed files with 6 additions and 2 deletions

View File

@@ -65,7 +65,9 @@ def patch_model(
_to_torch(model) _to_torch(model)
monkey_patch_vllm_all_gather() monkey_patch_vllm_all_gather()
backup_ca_comm = tp_group.ca_comm backup_ca_comm = tp_group.ca_comm
# Use custom-allreduce here # Use custom-allreduce here.
# We found the custom allreduce is much faster than the built-in allreduce in torch,
# even with ENABLE_INTRA_NODE_COMM=1.
# tp_group.ca_comm = None # tp_group.ca_comm = None
yield torch.compile( yield torch.compile(
torch.no_grad()(model.forward), mode="max-autotune-no-cudagraphs" torch.no_grad()(model.forward), mode="max-autotune-no-cudagraphs"

View File

@@ -36,7 +36,9 @@ class TestBenchServing(unittest.TestCase):
) )
if is_in_ci(): if is_in_ci():
self.assertGreater(res["output_throughput"], 950) # There is a regression with torch 2.5
# This number was 950 for torch 2.4
self.assertGreater(res["output_throughput"], 800)
def test_offline_throughput_without_radix_cache(self): def test_offline_throughput_without_radix_cache(self):
res = run_bench_serving( res = run_bench_serving(