From 1c304aa9bce6c90943115080e27fb037e08f96a3 Mon Sep 17 00:00:00 2001 From: Nicolas Castet <26874160+nvcastet@users.noreply.github.com> Date: Thu, 23 Oct 2025 14:28:03 -0500 Subject: [PATCH] Log iteration # for prefill and decode (#9366) --- python/sglang/srt/managers/scheduler_metrics_mixin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/sglang/srt/managers/scheduler_metrics_mixin.py b/python/sglang/srt/managers/scheduler_metrics_mixin.py index aa2ae130d..c05591a7e 100644 --- a/python/sglang/srt/managers/scheduler_metrics_mixin.py +++ b/python/sglang/srt/managers/scheduler_metrics_mixin.py @@ -123,7 +123,7 @@ class SchedulerMetricsMixin: token_usage_msg = f"token usage: {token_usage:.2f}, " f = ( - f"Prefill batch. " + f"Prefill batch [{self.forward_ct + 1}], " f"#new-seq: {len(can_run_list)}, " f"#new-token: {adder.log_input_tokens}, " f"#cached-token: {adder.log_hit_tokens}, " @@ -246,7 +246,7 @@ class SchedulerMetricsMixin: gap_latency / self.server_args.decode_log_interval ) - msg = f"Decode batch. #running-req: {num_running_reqs}, {token_usage_msg}" + msg = f"Decode batch [{self.forward_ct}], #running-req: {num_running_reqs}, {token_usage_msg}" if self.spec_algorithm.is_none(): spec_accept_length = 0