Fix flashinfer (#430)
This commit is contained in:
@@ -113,7 +113,8 @@ class ModelRpcServer:
|
||||
f"max_prefill_num_token={self.max_prefill_num_token}, "
|
||||
f"context_len={self.model_config.context_len}, "
|
||||
)
|
||||
logger.info(f"server_args: {server_args.print_mode_args()}")
|
||||
if self.tp_rank == 0:
|
||||
logger.info(f"server_args: {server_args.print_mode_args()}")
|
||||
|
||||
# Init cache
|
||||
self.tree_cache = RadixCache(disable=server_args.disable_radix_cache)
|
||||
|
||||
@@ -110,12 +110,12 @@ class InputMetadata:
|
||||
self.kv_last_page_len = torch.ones(
|
||||
(self.batch_size,), dtype=torch.int32, device="cuda"
|
||||
)
|
||||
req_pool_indices_cpu = self.req_pool_indices.cpu().numpy()
|
||||
seq_lens_cpu = self.seq_lens.cpu().numpy()
|
||||
req_pool_indices_cpu = self.req_pool_indices.cpu().tolist()
|
||||
seq_lens_cpu = self.seq_lens.tolist()
|
||||
self.kv_indices = torch.cat(
|
||||
[
|
||||
self.req_to_token_pool.req_to_token[
|
||||
req_pool_indices_cpu[i]: seq_lens_cpu[i]
|
||||
req_pool_indices_cpu[i], : seq_lens_cpu[i]
|
||||
]
|
||||
for i in range(self.batch_size)
|
||||
],
|
||||
|
||||
Reference in New Issue
Block a user