fixed an error handling in bench_latency.py (#904)

This commit is contained in:
min-xu-et
2024-08-03 17:42:17 -07:00
committed by GitHub
parent 947402c829
commit 7dd8a7e6d9

View File

@@ -380,13 +380,15 @@ class Batch:
extend_num_tokens = seq_lens.sum() - prefix_lens.sum()
out_cache_loc = self.token_to_kv_pool.alloc(extend_num_tokens)
if out_cache_loc is None:
self.tree_cache.evict(extend_num_tokens, self.token_to_kv_pool.free)
out_cache_loc = self.token_to_kv_pool.alloc(extend_num_tokens)
if self.tree_cache is not None:
self.tree_cache.evict(extend_num_tokens, self.token_to_kv_pool.free)
out_cache_loc = self.token_to_kv_pool.alloc(extend_num_tokens)
if out_cache_loc is None:
logger.error("Prefill out of memory. This should never happen.")
self.tree_cache.pretty_print()
exit()
logger.error("Prefill out of memory. Try to lower your batch size.")
if self.tree_cache is not None:
self.tree_cache.pretty_print()
exit(1)
pt = 0
for i in range(bs):
@@ -637,9 +639,10 @@ class Batch:
self.out_cache_loc = self.token_to_kv_pool.alloc(bs)
if self.out_cache_loc is None:
logger.error("Decode out of memory. This should never happen.")
self.tree_cache.pretty_print()
exit()
logger.error("Decode out of memory. Try to lower your batch size.")
if self.tree_cache is not None:
self.tree_cache.pretty_print()
exit(1)
self.req_to_token_pool.req_to_token[
self.req_pool_indices, self.seq_lens - 1