Fix prefill size (#711)

This commit is contained in:
Ying Sheng
2024-07-24 03:41:15 -07:00
committed by GitHub
parent 00e4baa728
commit 4367f4bb8d
2 changed files with 5 additions and 0 deletions

View File

@@ -103,6 +103,10 @@ class ModelTpServer:
if server_args.max_running_requests is None
else server_args.max_running_requests
)
self.max_running_requests = min(
self.max_running_requests,
self.model_runner.req_to_token_pool.size - 1
)
self.int_token_logit_bias = torch.tensor(
get_int_token_logit_bias(self.tokenizer, self.model_config.vocab_size)
)