[1/n] apply wna16marlin kernel in moe weight only quantization (#7683)
Co-authored-by: 晟海 <huangtingwei.htw@antgroup.com> Co-authored-by: yych0745 <1398089567@qq.com> Co-authored-by: HandH1998 <1335248067@qq.com> Co-authored-by: 弋云 <yiyun.wyt@antgroup.com> Co-authored-by: walker-ai <2398833647@qq.com>
This commit is contained in:
14
test/srt/test_w4a8.py
Normal file
14
test/srt/test_w4a8.py
Normal file
@@ -0,0 +1,14 @@
|
||||
import sgl_kernel
|
||||
import torch
|
||||
|
||||
x = torch.randn(10, 10, device="cuda")
|
||||
qweight = torch.randn(10, 10, device="cuda")
|
||||
s1_scales = torch.randn(10, device="cuda")
|
||||
input_scales = torch.randn(10, device="cuda")
|
||||
s1_szeros = torch.randn(10, device="cuda")
|
||||
input_sum = torch.randn(10, device="cuda")
|
||||
output_buffer = torch.randn(10, device="cuda")
|
||||
|
||||
torch.ops.sgl_kernel.gemm_forward_cuda.default(
|
||||
x, qweight, s1_scales, input_scales, s1_szeros, input_sum, output_buffer
|
||||
)
|
||||
Reference in New Issue
Block a user