Support copying tensor from cpu to gpu without using copy engines (#10007)
This commit is contained in:
@@ -23,6 +23,7 @@ from sgl_kernel.cutlass_moe import cutlass_w4a8_moe_mm, get_cutlass_w4a8_moe_mm_
|
||||
from sgl_kernel.elementwise import (
|
||||
FusedSetKVBufferArg,
|
||||
apply_rope_with_cos_sin_cache_inplace,
|
||||
copy_to_gpu_no_ce,
|
||||
downcast_fp8,
|
||||
fused_add_rmsnorm,
|
||||
gelu_and_mul,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
from typing import List, Optional
|
||||
|
||||
import torch
|
||||
from sgl_kernel.utils import get_cuda_stream, is_arch_support_pdl
|
||||
@@ -367,3 +367,7 @@ def downcast_fp8(
|
||||
torch.ops.sgl_kernel.downcast_fp8(
|
||||
k, v, k_out, v_out, k_scale, v_scale, loc, mult, offset, get_cuda_stream()
|
||||
)
|
||||
|
||||
|
||||
def copy_to_gpu_no_ce(input: List[int], output: torch.Tensor):
|
||||
torch.ops.sgl_kernel.copy_to_gpu_no_ce(input, output)
|
||||
|
||||
Reference in New Issue
Block a user