use non_blocking h2d in ForwardBatch.prepare_mlp_sync_batch. (#11605)

This commit is contained in:
strgrb
2025-10-15 02:30:59 +08:00
committed by GitHub
parent 9e8a15a74c
commit 94d26d850d

View File

@@ -734,9 +734,8 @@ class ForwardBatch:
self.encoder_lens = self._pad_tensor_to_size(self.encoder_lens, bs)
self.positions = self._pad_tensor_to_size(self.positions, num_tokens)
self.global_num_tokens_cpu = global_num_tokens
self.global_num_tokens_gpu = self.global_num_tokens_gpu.new_tensor(
global_num_tokens
)
global_num_tokens_pinned = torch.tensor(global_num_tokens, pin_memory=True)
self.global_num_tokens_gpu.copy_(global_num_tokens_pinned, non_blocking=True)
if self.mrope_positions is not None:
self.mrope_positions = self._pad_tensor_to_size(self.mrope_positions, bs)