use non_blocking h2d in ForwardBatch.prepare_mlp_sync_batch. (#11605)
This commit is contained in:
@@ -734,9 +734,8 @@ class ForwardBatch:
|
||||
self.encoder_lens = self._pad_tensor_to_size(self.encoder_lens, bs)
|
||||
self.positions = self._pad_tensor_to_size(self.positions, num_tokens)
|
||||
self.global_num_tokens_cpu = global_num_tokens
|
||||
self.global_num_tokens_gpu = self.global_num_tokens_gpu.new_tensor(
|
||||
global_num_tokens
|
||||
)
|
||||
global_num_tokens_pinned = torch.tensor(global_num_tokens, pin_memory=True)
|
||||
self.global_num_tokens_gpu.copy_(global_num_tokens_pinned, non_blocking=True)
|
||||
|
||||
if self.mrope_positions is not None:
|
||||
self.mrope_positions = self._pad_tensor_to_size(self.mrope_positions, bs)
|
||||
|
||||
Reference in New Issue
Block a user