[Bugfix] Fix embedding model hangs with --enable-metrics (#2822)

This commit is contained in:
Chang Su
2025-01-10 13:14:51 -08:00
committed by GitHub
parent 8f15789314
commit f290bd4332
4 changed files with 49 additions and 4 deletions

View File

@@ -128,7 +128,7 @@ class ModelConfig:
self.num_hidden_layers = self.hf_text_config.num_hidden_layers
self.vocab_size = self.hf_text_config.vocab_size
# Veirfy quantization
# Verify quantization
self._verify_quantization()
# Cache attributes

View File

@@ -688,7 +688,7 @@ class TokenizerManager:
if self.enable_metrics:
completion_tokens = (
recv_obj.completion_tokens[i]
if recv_obj.completion_tokens
if getattr(recv_obj, "completion_tokens", None)
else 0
)
@@ -716,7 +716,11 @@ class TokenizerManager:
time.time() - state.created_time
)
# Compute time_per_output_token for the non-streaming case
if not state.obj.stream and completion_tokens >= 1:
if (
hasattr(state.obj, "stream")
and not state.obj.stream
and completion_tokens >= 1
):
self.metrics_collector.observe_time_per_output_token(
(time.time() - state.created_time)
/ completion_tokens

View File

@@ -724,7 +724,7 @@ class ModelRunner:
elif forward_batch.forward_mode.is_idle():
return self.forward_idle(forward_batch)
else:
raise ValueError(f"Invaid forward mode: {forward_batch.forward_mode}")
raise ValueError(f"Invalid forward mode: {forward_batch.forward_mode}")
def sample(
self, logits_output: LogitsProcessorOutput, forward_batch: ForwardBatch