Enable overlap by default (#2067)
This commit is contained in:
@@ -157,14 +157,19 @@ class TpModelWorkerClient:
|
||||
|
||||
def forward_batch_generation(self, model_worker_batch: ModelWorkerBatch):
|
||||
# A cuda stream sync here to avoid the cuda illegal memory access error.
|
||||
_ = model_worker_batch.seq_lens[0].item()
|
||||
torch.cuda.current_stream().synchronize()
|
||||
|
||||
# Create a new copy of sampling_info because it will be updated in-place by the scheduler for the next batch.
|
||||
sampling_info = model_worker_batch.sampling_info
|
||||
sampling_info.update_penalties()
|
||||
model_worker_batch.sampling_info = self.cur_sampling_info = dataclasses.replace(
|
||||
sampling_info,
|
||||
sampling_info_done=threading.Event(),
|
||||
scaling_penalties=sampling_info.scaling_penalties,
|
||||
linear_penalties=sampling_info.linear_penalties,
|
||||
)
|
||||
|
||||
# Push a new batch to the queue
|
||||
model_worker_batch.sampling_info = dataclasses.replace(
|
||||
model_worker_batch.sampling_info,
|
||||
sampling_info_done=threading.Event(),
|
||||
)
|
||||
self.cur_sampling_info = model_worker_batch.sampling_info
|
||||
self.input_queue.put((model_worker_batch, self.future_token_ids_ct))
|
||||
|
||||
# Allocate output future objects
|
||||
|
||||
Reference in New Issue
Block a user