Fix the race condition in overlap mode (#1712)
This commit is contained in:
@@ -405,9 +405,9 @@ class ScheduleBatch:
|
||||
|
||||
# Request, memory pool, and cache
|
||||
reqs: List[Req]
|
||||
req_to_token_pool: ReqToTokenPool
|
||||
token_to_kv_pool: BaseTokenToKVPool
|
||||
tree_cache: BasePrefixCache
|
||||
req_to_token_pool: ReqToTokenPool = None
|
||||
token_to_kv_pool: BaseTokenToKVPool = None
|
||||
tree_cache: BasePrefixCache = None
|
||||
|
||||
forward_mode: ForwardMode = None
|
||||
sampling_info: SamplingBatchInfo = None
|
||||
@@ -874,12 +874,9 @@ class ScheduleBatch:
|
||||
def copy(self):
|
||||
return ScheduleBatch(
|
||||
reqs=self.reqs,
|
||||
req_to_token_pool=self.req_to_token_pool,
|
||||
token_to_kv_pool=self.token_to_kv_pool,
|
||||
tree_cache=self.tree_cache,
|
||||
forward_mode=self.forward_mode,
|
||||
output_ids=self.output_ids,
|
||||
sampling_info=self.sampling_info,
|
||||
out_cache_loc=self.out_cache_loc,
|
||||
return_logprob=self.return_logprob,
|
||||
decoding_reqs=self.decoding_reqs,
|
||||
)
|
||||
|
||||
@@ -929,7 +926,7 @@ class ModelWorkerBatch:
|
||||
forward_mode=self.forward_mode,
|
||||
input_ids=self.input_ids.clone(),
|
||||
req_pool_indices=self.req_pool_indices,
|
||||
seq_lens=self.seq_lens,
|
||||
seq_lens=self.seq_lens.clone(),
|
||||
out_cache_loc=self.out_cache_loc,
|
||||
return_logprob=self.return_logprob,
|
||||
top_logprobs_nums=self.top_logprobs_nums,
|
||||
|
||||
@@ -261,12 +261,7 @@ class Scheduler:
|
||||
self.resolve_next_token_ids = (
|
||||
lambda bid, x: self.tp_worker.resolve_future_token_ids(bid)
|
||||
)
|
||||
|
||||
def cache_finished_req(req):
|
||||
free_delta = int(self.running_batch and req in self.cur_batch.reqs)
|
||||
self.tree_cache.cache_finished_req(req, free_delta=free_delta)
|
||||
|
||||
self.cache_finished_req = cache_finished_req
|
||||
self.cache_finished_req = self.tree_cache.cache_finished_req
|
||||
else:
|
||||
self.forward_batch_generation = self.tp_worker.forward_batch_generation
|
||||
self.resolve_next_token_ids = lambda bid, x: x.tolist()
|
||||
@@ -798,7 +793,6 @@ class Scheduler:
|
||||
i, req, logprob_pt, next_token_ids, logits_output
|
||||
)
|
||||
else: # embedding or reward model
|
||||
assert batch.extend_num_tokens != 0
|
||||
embeddings, bid = result
|
||||
embeddings = embeddings.tolist()
|
||||
|
||||
@@ -838,6 +832,7 @@ class Scheduler:
|
||||
# Check finish condition
|
||||
for i, (req, next_token_id) in enumerate(zip(batch.reqs, next_token_ids)):
|
||||
if self.server_args.enable_overlap_schedule and req.finished():
|
||||
self.token_to_kv_pool.free(batch.out_cache_loc[i : i + 1])
|
||||
continue
|
||||
|
||||
req.completion_tokens_wo_jump_forward += 1
|
||||
|
||||
@@ -149,14 +149,12 @@ class TpModelWorker:
|
||||
)
|
||||
|
||||
# Resolve future tokens in the input
|
||||
# logger.info(f"raw input {model_worker_batch.input_ids=}")
|
||||
tic2 = time.time()
|
||||
resolved_input_ids = model_worker_batch.input_ids
|
||||
future_mask = resolved_input_ids < 0
|
||||
resolved_input_ids[future_mask] = self.future_token_ids_map[
|
||||
-resolved_input_ids[future_mask]
|
||||
]
|
||||
# logger.info(f"resolved input {model_worker_batch.input_ids=}")
|
||||
|
||||
# Run forward
|
||||
logits_output, next_token_ids = self.forward_batch_generation(
|
||||
@@ -215,12 +213,13 @@ class TpModelWorker:
|
||||
self.future_logits_output_ct += 1
|
||||
|
||||
bs = len(model_worker_batch.seq_lens)
|
||||
future_next_token_ids = -torch.arange(
|
||||
self.future_token_ids_ct + 1,
|
||||
self.future_token_ids_ct + 1 + bs,
|
||||
dtype=torch.int32,
|
||||
device=self.device,
|
||||
)
|
||||
with torch.cuda.stream(self.forward_stream):
|
||||
future_next_token_ids = -torch.arange(
|
||||
self.future_token_ids_ct + 1,
|
||||
self.future_token_ids_ct + 1 + bs,
|
||||
dtype=torch.int32,
|
||||
device=self.device,
|
||||
)
|
||||
self.future_token_ids_ct = (
|
||||
self.future_token_ids_ct + bs
|
||||
) % self.future_token_ids_limit
|
||||
|
||||
Reference in New Issue
Block a user