[7/n] decouple quantization impl from vllm dependency - gguf kernel (#11019)

This commit is contained in:
PGFLMG
2025-10-12 05:04:57 +08:00
committed by GitHub
parent b5dcfd4154
commit 8fdcd98efe
19 changed files with 7936 additions and 1 deletions

View File

@@ -271,6 +271,8 @@ set(SOURCES
"csrc/elementwise/topk.cu"
"csrc/common_extension.cc"
"csrc/quantization/gguf/gguf_kernel.cu"
"csrc/gemm/awq_kernel.cu"
"csrc/gemm/bmm_fp8.cu"
"csrc/gemm/dsv3_fused_a_gemm.cu"
@@ -306,6 +308,7 @@ set(SOURCES
"csrc/moe/marlin_moe_wna16/ops.cu"
"csrc/moe/moe_align_kernel.cu"
"csrc/moe/moe_fused_gate.cu"
"csrc/moe/moe_sum.cu"
"csrc/moe/moe_sum_reduce.cu"
"csrc/moe/moe_topk_softmax_kernels.cu"
"csrc/moe/nvfp4_blockwise_moe.cu"

View File

@@ -114,6 +114,37 @@ TORCH_LIBRARY_FRAGMENT(sgl_kernel, m) {
"cu_seqlens_q) -> ()");
m.impl("fast_topk_transform_fused", torch::kCUDA, &fast_topk_transform_interface);
/*
* From gguf quantiztion
*/
m.def(
"ggml_dequantize(Tensor W, int type, SymInt m, SymInt n, ScalarType? "
"dtype) -> Tensor");
m.impl("ggml_dequantize", torch::kCUDA, &ggml_dequantize);
m.def(
"ggml_mul_mat_vec_a8(Tensor W, Tensor X, int type, SymInt row) "
"-> Tensor");
m.impl("ggml_mul_mat_vec_a8", torch::kCUDA, &ggml_mul_mat_vec_a8);
m.def("ggml_mul_mat_a8(Tensor W, Tensor X, int type, SymInt row) -> Tensor");
m.impl("ggml_mul_mat_a8", torch::kCUDA, &ggml_mul_mat_a8);
m.def(
"ggml_moe_a8(Tensor X, Tensor W, "
"Tensor sorted_token_ids, Tensor expert_ids, Tensor "
"num_tokens_post_padded, "
"int type, SymInt row, SymInt top_k, SymInt tokens) -> Tensor");
m.impl("ggml_moe_a8", torch::kCUDA, &ggml_moe_a8);
m.def(
"ggml_moe_a8_vec(Tensor X, Tensor W, "
"Tensor topk_ids, int top_k, "
"int type, SymInt row, SymInt tokens) -> Tensor");
m.impl("ggml_moe_a8_vec", torch::kCUDA, &ggml_moe_a8_vec);
m.def("ggml_moe_get_block_size", &ggml_moe_get_block_size);
/*
* From csrc/gemm
*/
@@ -226,17 +257,23 @@ TORCH_LIBRARY_FRAGMENT(sgl_kernel, m) {
m.def("moe_sum_reduce(Tensor input, Tensor output, float routed_scaling_factor) -> ()");
m.impl("moe_sum_reduce", torch::kCUDA, &moe_sum_reduce);
m.def("moe_sum(Tensor input, Tensor! output) -> ()");
m.impl("moe_sum", torch::kCUDA, &moe_sum);
m.def(
"moe_fused_gate(Tensor input, Tensor bias, int num_expert_group, int topk_group, int topk, int "
"num_fused_shared_experts, float routed_scaling_factor, bool apply_routed_scaling_factor_on_output) -> "
"(Tensor[])");
m.impl("moe_fused_gate", torch::kCUDA, &moe_fused_gate);
m.def(
"fp8_blockwise_scaled_grouped_mm(Tensor output, Tensor a_ptrs, Tensor b_ptrs, Tensor out_ptrs, Tensor "
"a_scales_ptrs, Tensor b_scales_ptrs, Tensor a, Tensor b, Tensor scales_a, Tensor scales_b, Tensor "
"stride_a, Tensor stride_b, Tensor stride_c, Tensor layout_sfa, Tensor layout_sfb, Tensor problem_sizes, Tensor "
"expert_offsets, Tensor workspace) -> ()");
m.impl("fp8_blockwise_scaled_grouped_mm", torch::kCUDA, &fp8_blockwise_scaled_grouped_mm);
m.def(
"prepare_moe_input(Tensor topk_ids, Tensor expert_offsets, Tensor? blockscale_offsets, Tensor problem_sizes1,"
" Tensor problem_sizes2, Tensor input_permutation, Tensor output_permutation, int num_experts, int n, int k) -> "

View File

@@ -0,0 +1,66 @@
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <torch/all.h>
#include <ATen/cuda/Atomic.cuh>
#include <cub/cub.cuh>
#include "utils.h"
template <typename scalar_t, int TOPK>
__global__ void moe_sum_kernel(
scalar_t* __restrict__ out, // [..., d]
const scalar_t* __restrict__ input, // [..., topk, d]
const int d) {
const int64_t token_idx = blockIdx.x;
for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) {
scalar_t x = 0.0;
#pragma unroll
for (int k = 0; k < TOPK; ++k) {
x += SGLANG_LDG(&input[token_idx * TOPK * d + k * d + idx]);
}
out[token_idx * d + idx] = x;
}
}
void moe_sum(
torch::Tensor& input, // [num_tokens, topk, hidden_size]
torch::Tensor& output) // [num_tokens, hidden_size]
{
const int hidden_size = input.size(-1);
const auto num_tokens = output.numel() / hidden_size;
const int topk = input.size(1);
dim3 grid(num_tokens);
dim3 block(std::min(hidden_size, 1024));
const at::cuda::OptionalCUDAGuard device_guard(device_of(output));
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
switch (topk) {
case 2:
DISPATCH_FLOAT_TYPES(input.scalar_type(), "moe_sum_kernel", [&] {
moe_sum_kernel<scalar_t, 2>
<<<grid, block, 0, stream>>>(output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), hidden_size);
});
break;
case 3:
DISPATCH_FLOAT_TYPES(input.scalar_type(), "moe_sum_kernel", [&] {
moe_sum_kernel<scalar_t, 3>
<<<grid, block, 0, stream>>>(output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), hidden_size);
});
break;
case 4:
DISPATCH_FLOAT_TYPES(input.scalar_type(), "moe_sum_kernel", [&] {
moe_sum_kernel<scalar_t, 4>
<<<grid, block, 0, stream>>>(output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), hidden_size);
});
break;
default:
at::sum_out(output, input, 1);
break;
}
}

View File

@@ -0,0 +1,583 @@
// copied from
// https://github.com/vllm-project/vllm/blob/4492e3a55428e161ca8db381edc28263e5da4c8d/csrc/quantization/gguf/dequantize.cuh
// copied and adapted from https://github.com/ggerganov/llama.cpp/blob/b2899/ggml-cuda/convert.cu
// Dequant functions
static __device__ __forceinline__ void dequantize_q4_0(const void* vx, const int ib, const int iqs, dfloat2& v) {
const block_q4_0* x = (const block_q4_0*)vx;
const dfloat d = x[ib].d;
const int vui = x[ib].qs[iqs];
v.x = __int2half_rn(vui & 0xF);
v.y = __int2half_rn(vui >> 4);
v = __hsub2(v, __floats2half2_rn(8.0f, 8.0f));
v = __hmul2(v, {d, d});
}
static __device__ __forceinline__ void dequantize_q4_1(const void* vx, const int ib, const int iqs, dfloat2& v) {
const block_q4_1* x = (const block_q4_1*)vx;
const dfloat d = __low2half(x[ib].dm);
const dfloat m = __high2half(x[ib].dm);
const int vui = x[ib].qs[iqs];
v.x = __int2half_rn(vui & 0xF);
v.y = __int2half_rn(vui >> 4);
v = __hmul2(v, {d, d});
v = __hadd2(v, {m, m});
}
static __device__ __forceinline__ void dequantize_q5_0(const void* vx, const int ib, const int iqs, dfloat2& v) {
const block_q5_0* x = (const block_q5_0*)vx;
const dfloat d = x[ib].d;
uint32_t qh;
memcpy(&qh, x[ib].qh, sizeof(qh));
const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
const int xh_1 = ((qh >> (iqs + 12))) & 0x10;
v.x = __int2half_rn((x[ib].qs[iqs] & 0xf) | xh_0);
v.y = __int2half_rn((x[ib].qs[iqs] >> 4) | xh_1);
v = __hsub2(v, __floats2half2_rn(16.0f, 16.0f));
v = __hmul2(v, {d, d});
}
static __device__ __forceinline__ void dequantize_q5_1(const void* vx, const int ib, const int iqs, dfloat2& v) {
const block_q5_1* x = (const block_q5_1*)vx;
const dfloat d = __low2half(x[ib].dm);
const dfloat m = __high2half(x[ib].dm);
uint32_t qh;
memcpy(&qh, x[ib].qh, sizeof(qh));
const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
const int xh_1 = ((qh >> (iqs + 12))) & 0x10;
v.x = __int2half_rn((x[ib].qs[iqs] & 0xf) | xh_0);
v.y = __int2half_rn((x[ib].qs[iqs] >> 4) | xh_1);
v = __hmul2(v, {d, d});
v = __hadd2(v, {m, m});
}
static __device__ __forceinline__ void dequantize_q8_0(const void* vx, const int ib, const int iqs, dfloat2& v) {
const block_q8_0* x = (const block_q8_0*)vx;
const dfloat d = x[ib].d;
v.x = __int2half_rn(x[ib].qs[iqs + 0]);
v.y = __int2half_rn(x[ib].qs[iqs + 1]);
v = __hmul2(v, {d, d});
}
template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
static __global__ void dequantize_block(const void* __restrict__ vx, dst_t* __restrict__ y, const int k) {
const int i = 2 * (blockDim.x * blockIdx.x + threadIdx.x);
if (i >= k) {
return;
}
const int ib = i / qk; // block index
const int iqs = (i % qk) / qr; // quant index
const int iybs = i - i % qk; // y block start index
const int y_offset = qr == 1 ? 1 : qk / 2;
// dequantize
dfloat2 v;
dequantize_kernel(vx, ib, iqs, v);
y[iybs + iqs + 0] = convert_from_half<dst_t>(v.x);
y[iybs + iqs + y_offset] = convert_from_half<dst_t>(v.y);
}
template <typename dst_t>
static __global__ void dequantize_block_q2_K(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const auto i = blockIdx.x;
const block_q2_K* x = (const block_q2_K*)vx;
const auto tid = threadIdx.x;
const int n = tid / 32;
const int l = tid - 32 * n;
const int is = 8 * n + l / 16;
const uint8_t q = x[i].qs[32 * n + l];
dst_t* y = yy + i * QK_K + 128 * n;
half dall = __low2half(x[i].dm);
half dmin = __high2half(x[i].dm);
y[l + 0] = convert_from_half<dst_t>(__hsub(
__hmul(dall, __int2half_rn((x[i].scales[is + 0] & 0xF) * ((q >> 0) & 3))),
__hmul(dmin, __int2half_rn(x[i].scales[is + 0] >> 4))));
y[l + 32] = convert_from_half<dst_t>(__hsub(
__hmul(dall, __int2half_rn((x[i].scales[is + 2] & 0xF) * ((q >> 2) & 3))),
__hmul(dmin, __int2half_rn(x[i].scales[is + 2] >> 4))));
y[l + 64] = convert_from_half<dst_t>(__hsub(
__hmul(dall, __int2half_rn((x[i].scales[is + 4] & 0xF) * ((q >> 4) & 3))),
__hmul(dmin, __int2half_rn(x[i].scales[is + 4] >> 4))));
y[l + 96] = convert_from_half<dst_t>(__hsub(
__hmul(dall, __int2half_rn((x[i].scales[is + 6] & 0xF) * ((q >> 6) & 3))),
__hmul(dmin, __int2half_rn(x[i].scales[is + 6] >> 4))));
}
template <typename dst_t>
static __global__ void dequantize_block_q3_K(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const auto i = blockIdx.x;
const block_q3_K* x = (const block_q3_K*)vx;
const auto r = threadIdx.x / 4;
const int tid = r / 2;
const int is0 = r % 2;
const int l0 = 16 * is0 + 4 * (threadIdx.x % 4);
const int n = tid / 4;
const int j = tid - 4 * n;
uint8_t m = 1 << (4 * n + j);
int is = 8 * n + 2 * j + is0;
int shift = 2 * j;
int8_t us = is < 4 ? (x[i].scales[is - 0] & 0xF) | (((x[i].scales[is + 8] >> 0) & 3) << 4)
: is < 8 ? (x[i].scales[is - 0] & 0xF) | (((x[i].scales[is + 4] >> 2) & 3) << 4)
: is < 12 ? (x[i].scales[is - 8] >> 4) | (((x[i].scales[is + 0] >> 4) & 3) << 4)
: (x[i].scales[is - 8] >> 4) | (((x[i].scales[is - 4] >> 6) & 3) << 4);
half d_all = x[i].d;
half dl = __hmul(d_all, __int2half_rn(us - 32));
dst_t* y = yy + i * QK_K + 128 * n + 32 * j;
const uint8_t* q = x[i].qs + 32 * n;
const uint8_t* hm = x[i].hmask;
for (int l = l0; l < l0 + 4; ++l) {
y[l] = convert_from_half<dst_t>(__hmul(dl, __int2half_rn((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4))));
}
}
static inline __device__ void get_scale_min_k4(int j, const uint8_t* q, uint8_t& d, uint8_t& m) {
if (j < 4) {
d = q[j] & 63;
m = q[j + 4] & 63;
} else {
d = (q[j + 4] & 0xF) | ((q[j - 4] >> 6) << 4);
m = (q[j + 4] >> 4) | ((q[j - 0] >> 6) << 4);
}
}
template <typename dst_t>
static __global__ void dequantize_block_q4_K(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const block_q4_K* x = (const block_q4_K*)vx;
const auto i = blockIdx.x;
// assume 32 threads
const auto tid = threadIdx.x;
const int il = tid / 8;
const int ir = tid % 8;
const int is = 2 * il;
const int n = 4;
dst_t* y = yy + i * QK_K + 64 * il + n * ir;
const half dall = __low2half(x[i].dm);
const half dmin = __high2half(x[i].dm);
const uint8_t* q = x[i].qs + 32 * il + n * ir;
uint8_t sc, m;
get_scale_min_k4(is + 0, x[i].scales, sc, m);
const half d1 = __hmul(dall, __int2half_rn(sc));
const half m1 = __hmul(dmin, __int2half_rn(m));
get_scale_min_k4(is + 1, x[i].scales, sc, m);
const half d2 = __hmul(dall, __int2half_rn(sc));
const half m2 = __hmul(dmin, __int2half_rn(m));
for (int l = 0; l < n; ++l) {
y[l + 0] = convert_from_half<dst_t>(__hsub(__hmul(d1, __int2half_rn(q[l] & 0xF)), m1));
y[l + 32] = convert_from_half<dst_t>(__hsub(__hmul(d2, __int2half_rn(q[l] >> 4)), m2));
}
}
template <typename dst_t>
static __global__ void dequantize_block_q5_K(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const block_q5_K* x = (const block_q5_K*)vx;
const auto i = blockIdx.x;
// assume 64 threads - this is very slightly better than the one below
const auto tid = threadIdx.x;
const int il = tid / 16; // il is in 0...3
const int ir = tid % 16; // ir is in 0...15
const int is = 2 * il; // is is in 0...6
dst_t* y = yy + i * QK_K + 64 * il + 2 * ir;
const half dall = __low2half(x[i].dm);
const half dmin = __high2half(x[i].dm);
const uint8_t* ql = x[i].qs + 32 * il + 2 * ir;
const uint8_t* qh = x[i].qh + 2 * ir;
uint8_t sc, m;
get_scale_min_k4(is + 0, x[i].scales, sc, m);
const half d1 = __hmul(dall, __int2half_rn(sc));
const half m1 = __hmul(dmin, __int2half_rn(m));
get_scale_min_k4(is + 1, x[i].scales, sc, m);
const half d2 = __hmul(dall, __int2half_rn(sc));
const half m2 = __hmul(dmin, __int2half_rn(m));
uint8_t hm = 1 << (2 * il);
y[0] = convert_from_half<dst_t>(__hsub(__hmul(d1, __int2half_rn((ql[0] & 0xF) + (qh[0] & hm ? 16 : 0))), m1));
y[1] = convert_from_half<dst_t>(__hsub(__hmul(d1, __int2half_rn((ql[1] & 0xF) + (qh[1] & hm ? 16 : 0))), m1));
hm <<= 1;
y[32] = convert_from_half<dst_t>(__hsub(__hmul(d2, __int2half_rn((ql[0] >> 4) + (qh[0] & hm ? 16 : 0))), m2));
y[33] = convert_from_half<dst_t>(__hsub(__hmul(d2, __int2half_rn((ql[1] >> 4) + (qh[1] & hm ? 16 : 0))), m2));
}
template <typename dst_t>
static __global__ void dequantize_block_q6_K(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const block_q6_K* x = (const block_q6_K*)vx;
const auto i = blockIdx.x;
// assume 64 threads - this is very slightly better than the one below
const auto tid = threadIdx.x;
const int ip = tid / 32; // ip is 0 or 1
const int il = tid - 32 * ip; // 0...32
const int is = 8 * ip + il / 16;
dst_t* y = yy + i * QK_K + 128 * ip + il;
const half d = x[i].d;
const uint8_t* ql = x[i].ql + 64 * ip + il;
const uint8_t qh = x[i].qh[32 * ip + il];
const int8_t* sc = x[i].scales + is;
y[0] = convert_from_half<dst_t>(
__hmul(d, __int2half_rn(sc[0] * ((int8_t)((ql[0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32))));
y[32] = convert_from_half<dst_t>(
__hmul(d, __int2half_rn(sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32))));
y[64] = convert_from_half<dst_t>(
__hmul(d, __int2half_rn(sc[4] * ((int8_t)((ql[0] >> 4) | (((qh >> 4) & 3) << 4)) - 32))));
y[96] = convert_from_half<dst_t>(
__hmul(d, __int2half_rn(sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32))));
}
template <typename dst_t>
static __global__ void dequantize_block_iq2_xxs(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const auto i = blockIdx.x;
const block_iq2_xxs* x = (const block_iq2_xxs*)vx;
const auto tid = threadIdx.x;
const int il = tid / 8; // 0...3
const int ib = tid % 8; // 0...7
dst_t* y = yy + i * QK_K + 32 * ib + 8 * il;
const uint16_t* q2 = x[i].qs + 4 * ib;
const uint8_t* aux8 = (const uint8_t*)q2;
const uint8_t* grid = (const uint8_t*)(iq2xxs_grid + aux8[il]);
const uint32_t aux32 = q2[2] | (q2[3] << 16);
const float d = __half2float(x[i].d) * (0.5f + (aux32 >> 28)) * 0.25f;
const uint8_t signs = ksigns_iq2xs[(aux32 >> 7 * il) & 127];
for (int j = 0; j < 8; ++j)
y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
}
template <typename dst_t>
static __global__ void dequantize_block_iq2_xs(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const auto i = blockIdx.x;
const block_iq2_xs* x = (const block_iq2_xs*)vx;
const auto tid = threadIdx.x;
const int il = tid / 8; // 0...3
const int ib = tid % 8; // 0...7
dst_t* y = yy + i * QK_K + 32 * ib + 8 * il;
const uint16_t* q2 = x[i].qs + 4 * ib;
const uint8_t* grid = (const uint8_t*)(iq2xs_grid + (q2[il] & 511));
const float d = __half2float(x[i].d) * (0.5f + ((x[i].scales[ib] >> 4 * (il / 2)) & 0xf)) * 0.25f;
const uint8_t signs = ksigns_iq2xs[q2[il] >> 9];
for (int j = 0; j < 8; ++j)
y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
}
template <typename dst_t>
static __global__ void dequantize_block_iq2_s(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const auto i = blockIdx.x;
const block_iq2_s* x = (const block_iq2_s*)vx;
const auto tid = threadIdx.x;
const int il = tid / 8; // 0...3
const int ib = tid % 8; // 0...7
dst_t* y = yy + i * QK_K + 32 * ib + 8 * il;
const uint8_t* grid = (const uint8_t*)(iq2s_grid + (x[i].qs[4 * ib + il] | ((x[i].qh[ib] << (8 - 2 * il)) & 0x300)));
const float d = __half2float(x[i].d) * (0.5f + ((x[i].scales[ib] >> 4 * (il / 2)) & 0xf)) * 0.25f;
const uint8_t signs = x[i].qs[QK_K / 8 + 4 * ib + il];
for (int j = 0; j < 8; ++j)
y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
}
template <typename dst_t>
static __global__ void dequantize_block_iq3_xxs(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const auto i = blockIdx.x;
const block_iq3_xxs* x = (const block_iq3_xxs*)vx;
const auto tid = threadIdx.x;
const int il = tid / 8; // 0...3
const int ib = tid % 8; // 0...7
dst_t* y = yy + i * QK_K + 32 * ib + 8 * il;
const uint8_t* q3 = x[i].qs + 8 * ib;
const uint16_t* gas = (const uint16_t*)(x[i].qs + QK_K / 4) + 2 * ib;
const uint8_t* grid1 = (const uint8_t*)(iq3xxs_grid + q3[2 * il + 0]);
const uint8_t* grid2 = (const uint8_t*)(iq3xxs_grid + q3[2 * il + 1]);
const uint32_t aux32 = gas[0] | (gas[1] << 16);
const float d = __half2float(x[i].d) * (0.5f + (aux32 >> 28)) * 0.5f;
const uint8_t signs = ksigns_iq2xs[(aux32 >> 7 * il) & 127];
for (int j = 0; j < 4; ++j) {
y[j + 0] = d * grid1[j] * (signs & kmask_iq2xs[j + 0] ? -1.f : 1.f);
y[j + 4] = d * grid2[j] * (signs & kmask_iq2xs[j + 4] ? -1.f : 1.f);
}
}
template <typename dst_t>
static __global__ void dequantize_block_iq3_s(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const auto i = blockIdx.x;
const block_iq3_s* x = (const block_iq3_s*)vx;
const auto tid = threadIdx.x;
const int il = tid / 8; // 0...3
const int ib = tid % 8; // 0...7
dst_t* y = yy + i * QK_K + 32 * ib + 8 * il;
const uint8_t* qs = x[i].qs + 8 * ib;
const uint8_t* grid1 = (const uint8_t*)(iq3xs_grid + (qs[2 * il + 0] | ((x[i].qh[ib] << (8 - 2 * il)) & 256)));
const uint8_t* grid2 = (const uint8_t*)(iq3xs_grid + (qs[2 * il + 1] | ((x[i].qh[ib] << (7 - 2 * il)) & 256)));
const float d = __half2float(x[i].d) * (0.5f + ((x[i].scales[ib / 2] >> 4 * (ib % 2)) & 0xf)) * 0.5f;
const uint8_t signs = x[i].signs[4 * ib + il];
for (int j = 0; j < 4; ++j) {
y[j + 0] = d * grid1[j] * (signs & kmask_iq2xs[j + 0] ? -1.f : 1.f);
y[j + 4] = d * grid2[j] * (signs & kmask_iq2xs[j + 4] ? -1.f : 1.f);
}
}
template <typename dst_t>
static __global__ void dequantize_block_iq1_s(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const int64_t i = blockIdx.x;
const block_iq1_s* x = (const block_iq1_s*)vx;
const int64_t tid = threadIdx.x;
const int64_t il = tid / 8; // 0...3
const int64_t ib = tid % 8; // 0...7
dst_t* y = yy + i * QK_K + 32 * ib + 8 * il;
const float delta = x[i].qh[ib] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA;
const float d = __half2float(x[i].d) * (2 * ((x[i].qh[ib] >> 12) & 7) + 1);
uint32_t grid32[2];
const int8_t* q = (const int8_t*)grid32;
grid32[0] = iq1s_grid_gpu[x[i].qs[4 * ib + il] | (((x[i].qh[ib] >> 3 * il) & 7) << 8)];
grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f;
grid32[0] &= 0x0f0f0f0f;
for (int j = 0; j < 8; ++j) {
y[j] = d * (q[j] + delta);
}
}
template <typename dst_t>
static __global__ void dequantize_block_iq1_m(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const int64_t i = blockIdx.x;
const block_iq1_m* x = (const block_iq1_m*)vx;
const int64_t tid = threadIdx.x;
const int64_t il = tid / 8; // 0...3
const int64_t ib = tid % 8; // 0...7
dst_t* y = yy + i * QK_K + 32 * ib + 8 * il;
const uint16_t* sc = (const uint16_t*)x[i].scales;
iq1m_scale_t scale;
scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
const int64_t ib16 = 2 * ib + il / 2; // sc[ib16/4] >> 3*(ib16%4) -> sc[ib/2] >> 3*((2*ib+il/2)%4);
const float d = __half2float(scale.f16) * (2 * ((sc[ib16 / 4] >> 3 * (ib16 % 4)) & 0x7) + 1);
const float delta = x[i].qh[2 * ib + il / 2] & (0x08 << 4 * (il % 2)) ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA;
uint32_t grid32[2];
const int8_t* q = (const int8_t*)grid32;
grid32[0] = iq1s_grid_gpu[x[i].qs[4 * ib + il] | (((x[i].qh[2 * ib + il / 2] >> 4 * (il % 2)) & 7) << 8)];
grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f;
grid32[0] &= 0x0f0f0f0f;
for (int j = 0; j < 8; ++j) {
y[j] = d * (q[j] + delta);
}
}
template <typename dst_t>
static __global__ void dequantize_block_iq4_nl(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const auto i = blockIdx.x;
const block_iq4_nl* x = (const block_iq4_nl*)vx + i * (QK_K / QK4_NL);
const auto tid = threadIdx.x;
const int il = tid / 8; // 0...3
const int ib = tid % 8; // 0...7
dst_t* y = yy + i * QK_K + 32 * ib + 4 * il;
const uint8_t* q4 = x[ib].qs + 4 * il;
const float d = __half2float(x[ib].d);
for (int j = 0; j < 4; ++j) {
y[j + 0] = d * kvalues_iq4nl[q4[j] & 0xf];
y[j + 16] = d * kvalues_iq4nl[q4[j] >> 4];
}
}
template <typename dst_t>
static __global__ void dequantize_block_iq4_xs(const void* __restrict__ vx, dst_t* __restrict__ yy) {
const auto i = blockIdx.x;
const block_iq4_xs* x = (const block_iq4_xs*)vx;
const auto tid = threadIdx.x;
const int il = tid / 8; // 0...3
const int ib = tid % 8; // 0...7
dst_t* y = yy + i * QK_K + 32 * ib + 4 * il;
const uint8_t* q4 = x[i].qs + 16 * ib + 4 * il;
const float d = __half2float(x[i].d) *
((((x[i].scales_l[ib / 2] >> 4 * (ib % 2)) & 0xf) | (((x[i].scales_h >> 2 * ib) & 3) << 4)) - 32);
for (int j = 0; j < 4; ++j) {
y[j + 0] = d * kvalues_iq4nl[q4[j] & 0xf];
y[j + 16] = d * kvalues_iq4nl[q4[j] >> 4];
}
}
template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
static void
dequantize_block_cuda(const void* __restrict__ vx, dst_t* __restrict__ y, const int k, cudaStream_t stream) {
const int num_blocks = (k + 2 * CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / (2 * CUDA_DEQUANTIZE_BLOCK_SIZE);
dequantize_block<qk, qr, dequantize_kernel><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
template <typename dst_t>
static void dequantize_row_q2_K_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_q2_K<<<nb, 64, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_q3_K_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_q3_K<<<nb, 64, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_q4_K_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_q4_K<<<nb, 32, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_q5_K_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_q5_K<<<nb, 64, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_q6_K_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_q6_K<<<nb, 64, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_iq2_xxs_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_iq2_xxs<<<nb, 32, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_iq2_xs_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_iq2_xs<<<nb, 32, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_iq2_s_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_iq2_s<<<nb, 32, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_iq3_xxs_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_iq3_xxs<<<nb, 32, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_iq3_s_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_iq3_s<<<nb, 32, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_iq1_s_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_iq1_s<<<nb, 32, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_iq1_m_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_iq1_m<<<nb, 32, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_iq4_nl_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = (k + QK_K - 1) / QK_K;
dequantize_block_iq4_nl<<<nb, 32, 0, stream>>>(vx, y);
}
template <typename dst_t>
static void dequantize_row_iq4_xs_cuda(const void* vx, dst_t* y, const int k, cudaStream_t stream) {
const int nb = (k + QK_K - 1) / QK_K;
dequantize_block_iq4_xs<<<nb, 32, 0, stream>>>(vx, y);
}
template <typename dst_t>
static to_cuda_ggml_t<dst_t> ggml_get_to_cuda(int64_t type) {
switch (type) {
case 2:
return dequantize_block_cuda<QK4_0, QR4_0, dequantize_q4_0>;
case 3:
return dequantize_block_cuda<QK4_1, QR4_1, dequantize_q4_1>;
case 6:
return dequantize_block_cuda<QK5_0, QR5_0, dequantize_q5_0>;
case 7:
return dequantize_block_cuda<QK5_1, QR5_1, dequantize_q5_1>;
case 8:
return dequantize_block_cuda<QK8_0, QR8_0, dequantize_q8_0>;
case 10:
return dequantize_row_q2_K_cuda;
case 11:
return dequantize_row_q3_K_cuda;
case 12:
return dequantize_row_q4_K_cuda;
case 13:
return dequantize_row_q5_K_cuda;
case 14:
return dequantize_row_q6_K_cuda;
case 16:
return dequantize_row_iq2_xxs_cuda;
case 17:
return dequantize_row_iq2_xs_cuda;
case 18:
return dequantize_row_iq3_xxs_cuda;
case 19:
return dequantize_row_iq1_s_cuda;
case 20:
return dequantize_row_iq4_nl_cuda;
case 21:
return dequantize_row_iq3_s_cuda;
case 22:
return dequantize_row_iq2_s_cuda;
case 23:
return dequantize_row_iq4_xs_cuda;
case 29:
return dequantize_row_iq1_m_cuda;
default:
return nullptr;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,836 @@
// Adatped from
// https://github.com/vllm-project/vllm/blob/755ed7b05be4743237d3339c4ff8c22bcaae04f4/csrc/quantization/gguf/gguf_kernel.cu
#include <c10/cuda/CUDAGuard.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/all.h>
// dont use clang-format here, it breaks the include order
// clang-format off
#include "utils.h"
#include "ggml-common.h"
#include "vecdotq.cuh"
#include "dequantize.cuh"
#include "mmvq.cuh"
#include "mmq.cuh"
#include "moe.cuh"
#include "moe_vec.cuh"
// clang-format off
// Q8 gemv
template <typename scalar_t>
static __global__ void
quantize_q8_1(const scalar_t* __restrict__ x, void* __restrict__ vy, const int kx, const int kx_padded) {
const auto ix = blockDim.x * blockIdx.x + threadIdx.x;
if (ix >= kx_padded) {
return;
}
const auto iy = blockDim.y * blockIdx.y + threadIdx.y;
const int i_padded = iy * kx_padded + ix;
block_q8_1* y = (block_q8_1*)vy;
const int ib = i_padded / QK8_1; // block index
const int iqs = i_padded % QK8_1; // quant index
const float xi = ix < kx ? static_cast<float>(x[iy * kx + ix]) : 0.0f;
float amax = fabsf(xi);
float sum = xi;
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
amax = fmaxf(amax, SGLANG_SHFL_XOR_SYNC_WIDTH(uint32_t(-1), amax, mask, 32));
sum += SGLANG_SHFL_XOR_SYNC_WIDTH(uint32_t(-1), sum, mask, 32);
}
const float d = amax / 127;
const int8_t q = amax == 0.0f ? 0 : roundf(xi / d);
y[ib].qs[iqs] = q;
if (iqs > 0) {
return;
}
y[ib].ds.x = __float2half(d);
y[ib].ds.y = __float2half(sum);
}
template <typename scalar_t>
static void quantize_row_q8_1_cuda(const scalar_t* x, void* vy, const int kx, const int ky, cudaStream_t stream) {
const int64_t kx_padded = (kx + 512 - 1) / 512 * 512;
const int block_num_x = (kx_padded + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE;
constexpr int MAX_BLOCK_SIZE = 65535;
for (int off = 0; off < ky; off += MAX_BLOCK_SIZE) {
const int num_blocks_y = std::min(ky, off + MAX_BLOCK_SIZE) - off;
const dim3 num_blocks(block_num_x, num_blocks_y, 1);
const dim3 block_size(CUDA_DEQUANTIZE_BLOCK_SIZE, 1, 1);
quantize_q8_1<<<num_blocks, block_size, 0, stream>>>(
&x[off * kx], (int32_t*)vy + off * (kx_padded / 32 * 9), kx, kx_padded);
}
}
torch::Tensor ggml_dequantize(
torch::Tensor W, // quant weight
int64_t type,
int64_t m,
int64_t n,
std::optional<at::ScalarType> const& dtype) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(W));
auto dtype_ = dtype.value_or(torch::kFloat16);
auto options = torch::TensorOptions().dtype(dtype_).device(W.device());
at::Tensor DW = torch::empty({m, n}, options);
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
DISPATCH_FLOAT_TYPES(DW.scalar_type(), "ggml_dequantize", [&] {
auto to_cuda = ggml_get_to_cuda<scalar_t>(type);
to_cuda((void*)W.data_ptr(), (scalar_t*)DW.data_ptr(), m * n, stream);
});
return DW;
}
torch::Tensor ggml_mul_mat_vec_a8(
torch::Tensor W, // quant weight
torch::Tensor X, // input
int64_t type,
int64_t row) {
int col = X.sizes()[1];
int vecs = X.sizes()[0];
const int padded = (col + 512 - 1) / 512 * 512;
const at::cuda::OptionalCUDAGuard device_guard(device_of(X));
auto options = torch::TensorOptions().dtype(X.dtype()).device(W.device());
at::Tensor Y = torch::empty({vecs, row}, options);
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
options = torch::TensorOptions().dtype(torch::kInt32).device(W.device());
at::Tensor quant_X = torch::empty({vecs, padded / 32 * 9}, options);
DISPATCH_FLOAT_TYPES(X.scalar_type(), "ggml_mul_mat_vec_a8", [&] {
quantize_row_q8_1_cuda<scalar_t>((scalar_t*)X.data_ptr(), (void*)quant_X.data_ptr(), col, vecs, stream);
switch (type) {
case 2:
mul_mat_vec_q4_0_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 3:
mul_mat_vec_q4_1_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 6:
mul_mat_vec_q5_0_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 7:
mul_mat_vec_q5_1_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 8:
mul_mat_vec_q8_0_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 10:
mul_mat_vec_q2_K_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 11:
mul_mat_vec_q3_K_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 12:
mul_mat_vec_q4_K_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 13:
mul_mat_vec_q5_K_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 14:
mul_mat_vec_q6_K_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 16:
mul_mat_vec_iq2_xxs_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 17:
mul_mat_vec_iq2_xs_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 18:
mul_mat_vec_iq3_xxs_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 19:
mul_mat_vec_iq1_s_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 20:
mul_mat_vec_iq4_nl_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 21:
mul_mat_vec_iq3_s_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 22:
mul_mat_vec_iq2_s_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 23:
mul_mat_vec_iq4_xs_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
case 29:
mul_mat_vec_iq1_m_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(), (void*)quant_X.data_ptr(), (scalar_t*)Y.data_ptr(), col, row, vecs, stream);
break;
}
});
return Y;
}
torch::Tensor ggml_mul_mat_a8(
torch::Tensor W, // quant weight
torch::Tensor X, // input
int64_t type,
int64_t row) {
int col = X.sizes()[1];
int padded = (col + 512 - 1) / 512 * 512;
int batch = X.sizes()[0];
const at::cuda::OptionalCUDAGuard device_guard(device_of(X));
auto options = torch::TensorOptions().dtype(X.dtype()).device(W.device());
at::Tensor Y = torch::empty({batch, row}, options);
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
options = torch::TensorOptions().dtype(torch::kInt32).device(W.device());
at::Tensor quant_X = torch::empty({batch, padded / 32 * 9}, options);
DISPATCH_FLOAT_TYPES(X.scalar_type(), "ggml_mul_mat_a8", [&] {
quantize_row_q8_1_cuda((scalar_t*)X.data_ptr(), (void*)quant_X.data_ptr(), col, batch, stream);
switch (type) {
case 2:
ggml_mul_mat_q4_0_q8_1_cuda(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
col,
row,
batch,
padded,
row,
stream);
break;
case 3:
ggml_mul_mat_q4_1_q8_1_cuda(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
col,
row,
batch,
padded,
row,
stream);
break;
case 6:
ggml_mul_mat_q5_0_q8_1_cuda(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
col,
row,
batch,
padded,
row,
stream);
break;
case 7:
ggml_mul_mat_q5_1_q8_1_cuda(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
col,
row,
batch,
padded,
row,
stream);
break;
case 8:
ggml_mul_mat_q8_0_q8_1_cuda(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
col,
row,
batch,
padded,
row,
stream);
break;
case 10:
ggml_mul_mat_q2_K_q8_1_cuda(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
col,
row,
batch,
padded,
row,
stream);
break;
case 11:
ggml_mul_mat_q3_K_q8_1_cuda(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
col,
row,
batch,
padded,
row,
stream);
break;
case 12:
ggml_mul_mat_q4_K_q8_1_cuda(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
col,
row,
batch,
padded,
row,
stream);
break;
case 13:
ggml_mul_mat_q5_K_q8_1_cuda(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
col,
row,
batch,
padded,
row,
stream);
break;
case 14:
ggml_mul_mat_q6_K_q8_1_cuda(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
col,
row,
batch,
padded,
row,
stream);
break;
}
});
return Y;
}
torch::Tensor ggml_moe_a8(
torch::Tensor X, // input
torch::Tensor W, // expert weights
torch::Tensor sorted_token_ids,
torch::Tensor expert_ids,
torch::Tensor num_tokens_post_padded,
int64_t type,
int64_t row,
int64_t top_k,
int64_t tokens) {
int col = X.sizes()[1];
int padded = (col + 512 - 1) / 512 * 512;
const at::cuda::OptionalCUDAGuard device_guard(device_of(X));
auto options = torch::TensorOptions().dtype(X.dtype()).device(W.device());
at::Tensor Y = torch::empty({tokens * top_k, row}, options);
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
options = torch::TensorOptions().dtype(torch::kInt32).device(W.device());
at::Tensor quant_X = torch::empty({tokens, padded / 32 * 9}, options);
DISPATCH_FLOAT_TYPES(X.scalar_type(), "ggml_moe_a8", [&] {
quantize_row_q8_1_cuda((scalar_t*)X.data_ptr(), (void*)quant_X.data_ptr(), col, tokens, stream);
switch (type) {
case 2:
ggml_moe_q4_0_q8_1_cuda(
(void*)quant_X.data_ptr(),
(void*)W.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)sorted_token_ids.data_ptr(),
(int*)expert_ids.data_ptr(),
(int*)num_tokens_post_padded.data_ptr(),
W.stride(0),
col,
row,
tokens,
padded,
row,
top_k,
sorted_token_ids.sizes()[0],
stream);
break;
case 3:
ggml_moe_q4_1_q8_1_cuda(
(void*)quant_X.data_ptr(),
(void*)W.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)sorted_token_ids.data_ptr(),
(int*)expert_ids.data_ptr(),
(int*)num_tokens_post_padded.data_ptr(),
W.stride(0),
col,
row,
tokens,
padded,
row,
top_k,
sorted_token_ids.sizes()[0],
stream);
break;
case 6:
ggml_moe_q5_0_q8_1_cuda(
(void*)quant_X.data_ptr(),
(void*)W.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)sorted_token_ids.data_ptr(),
(int*)expert_ids.data_ptr(),
(int*)num_tokens_post_padded.data_ptr(),
W.stride(0),
col,
row,
tokens,
padded,
row,
top_k,
sorted_token_ids.sizes()[0],
stream);
break;
case 7:
ggml_moe_q5_1_q8_1_cuda(
(void*)quant_X.data_ptr(),
(void*)W.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)sorted_token_ids.data_ptr(),
(int*)expert_ids.data_ptr(),
(int*)num_tokens_post_padded.data_ptr(),
W.stride(0),
col,
row,
tokens,
padded,
row,
top_k,
sorted_token_ids.sizes()[0],
stream);
break;
case 8:
ggml_moe_q8_0_q8_1_cuda(
(void*)quant_X.data_ptr(),
(void*)W.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)sorted_token_ids.data_ptr(),
(int*)expert_ids.data_ptr(),
(int*)num_tokens_post_padded.data_ptr(),
W.stride(0),
col,
row,
tokens,
padded,
row,
top_k,
sorted_token_ids.sizes()[0],
stream);
break;
case 10:
ggml_moe_q2_K_q8_1_cuda(
(void*)quant_X.data_ptr(),
(void*)W.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)sorted_token_ids.data_ptr(),
(int*)expert_ids.data_ptr(),
(int*)num_tokens_post_padded.data_ptr(),
W.stride(0),
col,
row,
tokens,
padded,
row,
top_k,
sorted_token_ids.sizes()[0],
stream);
break;
case 11:
ggml_moe_q3_K_q8_1_cuda(
(void*)quant_X.data_ptr(),
(void*)W.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)sorted_token_ids.data_ptr(),
(int*)expert_ids.data_ptr(),
(int*)num_tokens_post_padded.data_ptr(),
W.stride(0),
col,
row,
tokens,
padded,
row,
top_k,
sorted_token_ids.sizes()[0],
stream);
break;
case 12:
ggml_moe_q4_K_q8_1_cuda(
(void*)quant_X.data_ptr(),
(void*)W.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)sorted_token_ids.data_ptr(),
(int*)expert_ids.data_ptr(),
(int*)num_tokens_post_padded.data_ptr(),
W.stride(0),
col,
row,
tokens,
padded,
row,
top_k,
sorted_token_ids.sizes()[0],
stream);
break;
case 13:
ggml_moe_q5_K_q8_1_cuda(
(void*)quant_X.data_ptr(),
(void*)W.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)sorted_token_ids.data_ptr(),
(int*)expert_ids.data_ptr(),
(int*)num_tokens_post_padded.data_ptr(),
W.stride(0),
col,
row,
tokens,
padded,
row,
top_k,
sorted_token_ids.sizes()[0],
stream);
break;
case 14:
ggml_moe_q6_K_q8_1_cuda(
(void*)quant_X.data_ptr(),
(void*)W.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)sorted_token_ids.data_ptr(),
(int*)expert_ids.data_ptr(),
(int*)num_tokens_post_padded.data_ptr(),
W.stride(0),
col,
row,
tokens,
padded,
row,
top_k,
sorted_token_ids.sizes()[0],
stream);
break;
}
});
return Y;
}
torch::Tensor ggml_moe_a8_vec(
torch::Tensor X, // input
torch::Tensor W, // expert weights
torch::Tensor topk_ids,
int64_t top_k,
int64_t type,
int64_t row,
int64_t tokens) {
int col = X.sizes()[1];
const int padded = (col + 512 - 1) / 512 * 512;
const at::cuda::OptionalCUDAGuard device_guard(device_of(X));
auto options = torch::TensorOptions().dtype(X.dtype()).device(W.device());
at::Tensor Y = torch::zeros({tokens * top_k, row}, options);
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
options = torch::TensorOptions().dtype(torch::kInt32).device(W.device());
at::Tensor quant_X = torch::empty({tokens, padded / 32 * 9}, options);
DISPATCH_FLOAT_TYPES(X.scalar_type(), "ggml_moe_vec_a8", [&] {
quantize_row_q8_1_cuda<scalar_t>((scalar_t*)X.data_ptr(), (void*)quant_X.data_ptr(), col, tokens, stream);
switch (type) {
case 2:
moe_vec_q4_0_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 3:
moe_vec_q4_1_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 6:
moe_vec_q5_0_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 7:
moe_vec_q5_1_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 8:
moe_vec_q8_0_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 10:
moe_vec_q2_K_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 11:
moe_vec_q3_K_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 12:
moe_vec_q4_K_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 13:
moe_vec_q5_K_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 14:
moe_vec_q6_K_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 16:
moe_vec_iq2_xxs_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 17:
moe_vec_iq2_xs_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 18:
moe_vec_iq3_xxs_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 19:
moe_vec_iq1_s_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 20:
moe_vec_iq4_nl_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 21:
moe_vec_iq3_s_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 22:
moe_vec_iq2_s_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 23:
moe_vec_iq4_xs_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
case 29:
moe_vec_iq1_m_q8_1_cuda<scalar_t>(
(void*)W.data_ptr(),
(void*)quant_X.data_ptr(),
(scalar_t*)Y.data_ptr(),
(int*)topk_ids.data_ptr(),
top_k,
tokens,
col,
row,
quant_X.stride(0),
stream);
break;
}
});
return Y;
}
int64_t ggml_moe_get_block_size(int64_t type) {
switch (type) {
case 2:
return MOE_X_Q4_0;
case 3:
return MOE_X_Q4_1;
case 6:
return MOE_X_Q5_0;
case 7:
return MOE_X_Q5_1;
case 8:
return MOE_X_Q8_0;
case 10:
return MOE_X_Q2_K;
case 11:
return MOE_X_Q3_K;
case 12:
return MOE_X_Q4_K;
case 13:
return MOE_X_Q5_K;
case 14:
return MOE_X_Q6_K;
}
return 0;
}

View File

@@ -0,0 +1,881 @@
// copied from
// https://github.com/vllm-project/vllm/blob/4492e3a55428e161ca8db381edc28263e5da4c8d/csrc/quantization/gguf/mmq.cuh
// copied from https://github.com/ggerganov/llama.cpp/blob/b2899/ggml-cuda/mmq.cu
template <
typename scalar_t,
int qk,
int qr,
int qi,
bool need_sum,
typename block_q_t,
int mmq_x,
int mmq_y,
int nwarps,
allocate_tiles_cuda_t allocate_tiles,
load_tiles_cuda_t load_tiles,
int vdr,
vec_dot_q_mul_mat_cuda_t vec_dot>
static __device__ __forceinline__ void mul_mat_q(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst) {
const block_q_t* x = (const block_q_t*)vx;
const block_q8_1* y = (const block_q8_1*)vy;
const int blocks_per_row_x = ncols_x / qk;
const int blocks_per_col_y = nrows_y / QK8_1;
const int blocks_per_warp = WARP_SIZE_GGUF / qi;
const int& ncols_dst = ncols_y;
const auto row_dst_0 = blockIdx.x * mmq_y;
const int& row_x_0 = row_dst_0;
const auto col_dst_0 = blockIdx.y * mmq_x;
const int& col_y_0 = col_dst_0;
int* tile_x_ql = nullptr;
half2* tile_x_dm = nullptr;
int* tile_x_qh = nullptr;
int* tile_x_sc = nullptr;
allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc);
__shared__ int tile_y_qs[mmq_x * WARP_SIZE_GGUF];
__shared__ half2 tile_y_ds[mmq_x * WARP_SIZE_GGUF / QI8_1];
float sum[mmq_y / WARP_SIZE_GGUF][mmq_x / nwarps] = {{0.0f}};
for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) {
load_tiles(
x + row_x_0 * blocks_per_row_x + ib0,
tile_x_ql,
tile_x_dm,
tile_x_qh,
tile_x_sc,
threadIdx.y,
nrows_x - row_x_0 - 1,
threadIdx.x,
blocks_per_row_x);
#pragma unroll
for (int ir = 0; ir < qr && ib0 + ir * blocks_per_warp / qr < blocks_per_row_x; ++ir) {
const auto kqs = ir * WARP_SIZE_GGUF + threadIdx.x;
const int kbxd = kqs / QI8_1;
#pragma unroll
for (int i = 0; i < mmq_x; i += nwarps) {
const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y - 1); // to prevent out-of-bounds memory accesses
const block_q8_1* by0 = &y[col_y_eff * blocks_per_col_y + ib0 * (qk / QK8_1) + kbxd];
const int index_y = (threadIdx.y + i) * WARP_SIZE_GGUF + kqs % WARP_SIZE_GGUF;
tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1);
}
#pragma unroll
for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) {
const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE_GGUF / QI8_1)) % mmq_x;
const auto kby = threadIdx.x % (WARP_SIZE_GGUF / QI8_1);
const int col_y_eff = min(col_y_0 + ids, ncols_y - 1);
// if the sum is not needed it's faster to transform the scale to f32 ahead of time
const half2* dsi_src =
&y[col_y_eff * blocks_per_col_y + ib0 * (qk / QK8_1) + ir * (WARP_SIZE_GGUF / QI8_1) + kby].ds;
half2* dsi_dst = &tile_y_ds[ids * (WARP_SIZE_GGUF / QI8_1) + kby];
if (need_sum) {
*dsi_dst = *dsi_src;
} else {
float* dfi_dst = (float*)dsi_dst;
*dfi_dst = __low2float(*dsi_src);
}
}
__syncthreads();
// #pragma unroll // unrolling this loop causes too much register pressure
for (int k = ir * WARP_SIZE_GGUF / qr; k < (ir + 1) * WARP_SIZE_GGUF / qr; k += vdr) {
#pragma unroll
for (int j = 0; j < mmq_x; j += nwarps) {
#pragma unroll
for (int i = 0; i < mmq_y; i += WARP_SIZE_GGUF) {
sum[i / WARP_SIZE_GGUF][j / nwarps] += vec_dot(
tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds, threadIdx.x + i, threadIdx.y + j, k);
}
}
}
__syncthreads();
}
}
#pragma unroll
for (int j = 0; j < mmq_x; j += nwarps) {
const auto col_dst = col_dst_0 + j + threadIdx.y;
if (col_dst >= ncols_dst) {
return;
}
#pragma unroll
for (int i = 0; i < mmq_y; i += WARP_SIZE_GGUF) {
const auto row_dst = row_dst_0 + threadIdx.x + i;
if (row_dst >= nrows_dst) {
continue;
}
dst[col_dst * nrows_dst + row_dst] = sum[i / WARP_SIZE_GGUF][j / nwarps];
}
}
}
#if defined(USE_ROCM)
#define MMQ_X_Q4_0 64
#define MMQ_Y_Q4_0 128
#define NWARPS_Q4_0 8
#else
#define MMQ_X_Q4_0 4
#define MMQ_Y_Q4_0 32
#define NWARPS_Q4_0 4
#endif
template <typename scalar_t, bool need_check>
static __global__ void
#if defined(USE_ROCM)
__launch_bounds__(WARP_SIZE_GGUF* NWARPS_Q4_0, 2)
#endif
mul_mat_q4_0(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst) {
const int mmq_x = MMQ_X_Q4_0;
const int mmq_y = MMQ_Y_Q4_0;
const int nwarps = NWARPS_Q4_0;
mul_mat_q<
scalar_t,
QK4_0,
QR4_0,
QI4_0,
true,
block_q4_0,
mmq_x,
mmq_y,
nwarps,
allocate_tiles_q4_0<mmq_y>,
load_tiles_q4_0<mmq_y, nwarps, need_check>,
VDR_Q4_0_Q8_1_MMQ,
vec_dot_q4_0_q8_1_mul_mat>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
template <typename scalar_t>
static void ggml_mul_mat_q4_0_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst,
cudaStream_t stream) {
int mmq_x = MMQ_X_Q4_0;
int mmq_y = MMQ_Y_Q4_0;
int nwarps = NWARPS_Q4_0;
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q4_0<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q4_0<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
#if defined(USE_ROCM)
#define MMQ_X_Q4_1 64
#define MMQ_Y_Q4_1 128
#define NWARPS_Q4_1 8
#else
#define MMQ_X_Q4_1 4
#define MMQ_Y_Q4_1 32
#define NWARPS_Q4_1 4
#endif
template <typename scalar_t, bool need_check>
static __global__ void
#if defined(USE_ROCM)
__launch_bounds__(WARP_SIZE_GGUF* NWARPS_Q4_1, 2)
#endif
mul_mat_q4_1(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst) {
const int mmq_x = MMQ_X_Q4_1;
const int mmq_y = MMQ_Y_Q4_1;
const int nwarps = NWARPS_Q4_1;
mul_mat_q<
scalar_t,
QK4_1,
QR4_1,
QI4_1,
true,
block_q4_1,
mmq_x,
mmq_y,
nwarps,
allocate_tiles_q4_1<mmq_y>,
load_tiles_q4_1<mmq_y, nwarps, need_check>,
VDR_Q4_1_Q8_1_MMQ,
vec_dot_q4_1_q8_1_mul_mat>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
template <typename scalar_t>
static void ggml_mul_mat_q4_1_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst,
cudaStream_t stream) {
int mmq_x = MMQ_X_Q4_1;
int mmq_y = MMQ_Y_Q4_1;
int nwarps = NWARPS_Q4_1;
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q4_1<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q4_1<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
#if defined(USE_ROCM)
#define MMQ_X_Q5_0 64
#define MMQ_Y_Q5_0 128
#define NWARPS_Q5_0 8
#else
#define MMQ_X_Q5_0 4
#define MMQ_Y_Q5_0 32
#define NWARPS_Q5_0 4
#endif
template <typename scalar_t, bool need_check>
static __global__ void
#if defined(USE_ROCM)
__launch_bounds__(WARP_SIZE_GGUF* NWARPS_Q5_0, 2)
#endif
mul_mat_q5_0(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst) {
const int mmq_x = MMQ_X_Q5_0;
const int mmq_y = MMQ_Y_Q5_0;
const int nwarps = NWARPS_Q5_0;
mul_mat_q<
scalar_t,
QK5_0,
QR5_0,
QI5_0,
false,
block_q5_0,
mmq_x,
mmq_y,
nwarps,
allocate_tiles_q5_0<mmq_y>,
load_tiles_q5_0<mmq_y, nwarps, need_check>,
VDR_Q5_0_Q8_1_MMQ,
vec_dot_q5_0_q8_1_mul_mat>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
template <typename scalar_t>
static void ggml_mul_mat_q5_0_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst,
cudaStream_t stream) {
const int mmq_x = MMQ_X_Q5_0;
const int mmq_y = MMQ_Y_Q5_0;
const int nwarps = NWARPS_Q5_0;
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q5_0<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q5_0<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
#if defined(USE_ROCM)
#define MMQ_X_Q5_1 64
#define MMQ_Y_Q5_1 128
#define NWARPS_Q5_1 8
#else
#define MMQ_X_Q5_1 4
#define MMQ_Y_Q5_1 32
#define NWARPS_Q5_1 4
#endif
template <typename scalar_t, bool need_check>
static __global__ void
#if defined(USE_ROCM)
__launch_bounds__(WARP_SIZE_GGUF* NWARPS_Q5_1, 2)
#endif
mul_mat_q5_1(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst) {
const int mmq_x = MMQ_X_Q5_1;
const int mmq_y = MMQ_Y_Q5_1;
const int nwarps = NWARPS_Q5_1;
mul_mat_q<
scalar_t,
QK5_1,
QR5_1,
QI5_1,
true,
block_q5_1,
mmq_x,
mmq_y,
nwarps,
allocate_tiles_q5_1<mmq_y>,
load_tiles_q5_1<mmq_y, nwarps, need_check>,
VDR_Q5_1_Q8_1_MMQ,
vec_dot_q5_1_q8_1_mul_mat>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
template <typename scalar_t>
static void ggml_mul_mat_q5_1_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst,
cudaStream_t stream) {
const int mmq_x = MMQ_X_Q5_1;
const int mmq_y = MMQ_Y_Q5_1;
const int nwarps = NWARPS_Q5_1;
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q5_1<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q5_1<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
#if defined(USE_ROCM)
#define MMQ_X_Q8_0 64
#define MMQ_Y_Q8_0 128
#define NWARPS_Q8_0 8
#else
#define MMQ_X_Q8_0 4
#define MMQ_Y_Q8_0 32
#define NWARPS_Q8_0 4
#endif
template <typename scalar_t, bool need_check>
static __global__ void
#if defined(USE_ROCM)
__launch_bounds__(WARP_SIZE_GGUF* NWARPS_Q8_0, 2)
#endif
mul_mat_q8_0(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst) {
const int mmq_x = MMQ_X_Q8_0;
const int mmq_y = MMQ_Y_Q8_0;
const int nwarps = NWARPS_Q8_0;
mul_mat_q<
scalar_t,
QK8_0,
QR8_0,
QI8_0,
false,
block_q8_0,
mmq_x,
mmq_y,
nwarps,
allocate_tiles_q8_0<mmq_y>,
load_tiles_q8_0<mmq_y, nwarps, need_check>,
VDR_Q8_0_Q8_1_MMQ,
vec_dot_q8_0_q8_1_mul_mat>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
template <typename scalar_t>
static void ggml_mul_mat_q8_0_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst,
cudaStream_t stream) {
const int mmq_x = MMQ_X_Q8_0;
const int mmq_y = MMQ_Y_Q8_0;
const int nwarps = NWARPS_Q8_0;
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q8_0<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q8_0<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
#if defined(USE_ROCM)
#define MMQ_X_Q2_K 64
#define MMQ_Y_Q2_K 128
#define NWARPS_Q2_K 8
#else
#define MMQ_X_Q2_K 4
#define MMQ_Y_Q2_K 32
#define NWARPS_Q2_K 4
#endif
template <typename scalar_t, bool need_check>
static __global__ void
#if defined(USE_ROCM)
__launch_bounds__(WARP_SIZE_GGUF* NWARPS_Q2_K, 2)
#endif
mul_mat_q2_K(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst) {
const int mmq_x = MMQ_X_Q2_K;
const int mmq_y = MMQ_Y_Q2_K;
const int nwarps = NWARPS_Q2_K;
mul_mat_q<
scalar_t,
QK_K,
QR2_K,
QI2_K,
false,
block_q2_K,
mmq_x,
mmq_y,
nwarps,
allocate_tiles_q2_K<mmq_y>,
load_tiles_q2_K<mmq_y, nwarps, need_check>,
VDR_Q2_K_Q8_1_MMQ,
vec_dot_q2_K_q8_1_mul_mat>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
template <typename scalar_t>
static void ggml_mul_mat_q2_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst,
cudaStream_t stream) {
const int mmq_x = MMQ_X_Q2_K;
const int mmq_y = MMQ_Y_Q2_K;
const int nwarps = NWARPS_Q2_K;
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q2_K<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q2_K<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
#if defined(USE_ROCM)
#define MMQ_X_Q3_K 64
#define MMQ_Y_Q3_K 128
#define NWARPS_Q3_K 8
#else
#define MMQ_X_Q3_K 4
#define MMQ_Y_Q3_K 32
#define NWARPS_Q3_K 4
#endif
template <typename scalar_t, bool need_check>
static __global__ void
#if defined(USE_ROCM)
__launch_bounds__(WARP_SIZE_GGUF* NWARPS_Q3_K, 2)
#endif
mul_mat_q3_K(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst) {
const int mmq_x = MMQ_X_Q3_K;
const int mmq_y = MMQ_Y_Q3_K;
const int nwarps = NWARPS_Q3_K;
mul_mat_q<
scalar_t,
QK_K,
QR3_K,
QI3_K,
false,
block_q3_K,
mmq_x,
mmq_y,
nwarps,
allocate_tiles_q3_K<mmq_y>,
load_tiles_q3_K<mmq_y, nwarps, need_check>,
VDR_Q3_K_Q8_1_MMQ,
vec_dot_q3_K_q8_1_mul_mat>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
template <typename scalar_t>
static void ggml_mul_mat_q3_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst,
cudaStream_t stream) {
const int mmq_x = MMQ_X_Q3_K;
const int mmq_y = MMQ_Y_Q3_K;
const int nwarps = NWARPS_Q3_K;
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q3_K<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q3_K<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
#if defined(USE_ROCM)
#define MMQ_X_Q4_K 64
#define MMQ_Y_Q4_K 128
#define NWARPS_Q4_K 8
#else
#define MMQ_X_Q4_K 4
#define MMQ_Y_Q4_K 32
#define NWARPS_Q4_K 4
#endif
template <typename scalar_t, bool need_check>
static __global__ void
#if defined(USE_ROCM)
__launch_bounds__(WARP_SIZE_GGUF* NWARPS_Q4_K, 2)
#endif
mul_mat_q4_K(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst) {
const int mmq_x = MMQ_X_Q4_K;
const int mmq_y = MMQ_Y_Q4_K;
const int nwarps = NWARPS_Q4_K;
mul_mat_q<
scalar_t,
QK_K,
QR4_K,
QI4_K,
true,
block_q4_K,
mmq_x,
mmq_y,
nwarps,
allocate_tiles_q4_K<mmq_y>,
load_tiles_q4_K<mmq_y, nwarps, need_check>,
VDR_Q4_K_Q8_1_MMQ,
vec_dot_q4_K_q8_1_mul_mat>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
template <typename scalar_t>
static void ggml_mul_mat_q4_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst,
cudaStream_t stream) {
const int mmq_x = MMQ_X_Q4_K;
const int mmq_y = MMQ_Y_Q4_K;
const int nwarps = NWARPS_Q4_K;
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q4_K<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q4_K<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
#if defined(USE_ROCM)
#define MMQ_X_Q5_K 64
#define MMQ_Y_Q5_K 128
#define NWARPS_Q5_K 8
#else
#define MMQ_X_Q5_K 4
#define MMQ_Y_Q5_K 32
#define NWARPS_Q5_K 4
#endif
template <typename scalar_t, bool need_check>
static __global__ void
#if defined(USE_ROCM)
__launch_bounds__(WARP_SIZE_GGUF* NWARPS_Q5_K, 2)
#endif
mul_mat_q5_K(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst) {
const int mmq_x = MMQ_X_Q5_K;
const int mmq_y = MMQ_Y_Q5_K;
const int nwarps = NWARPS_Q5_K;
mul_mat_q<
scalar_t,
QK_K,
QR5_K,
QI5_K,
true,
block_q5_K,
mmq_x,
mmq_y,
nwarps,
allocate_tiles_q5_K<mmq_y>,
load_tiles_q5_K<mmq_y, nwarps, need_check>,
VDR_Q5_K_Q8_1_MMQ,
vec_dot_q5_K_q8_1_mul_mat>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
template <typename scalar_t>
static void ggml_mul_mat_q5_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst,
cudaStream_t stream) {
const int mmq_x = MMQ_X_Q5_K;
const int mmq_y = MMQ_Y_Q5_K;
const int nwarps = NWARPS_Q5_K;
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q5_K<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q5_K<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
#if defined(USE_ROCM)
#define MMQ_X_Q6_K 64
#define MMQ_Y_Q6_K 128
#define NWARPS_Q6_K 8
#else
#define MMQ_X_Q6_K 4
#define MMQ_Y_Q6_K 32
#define NWARPS_Q6_K 4
#endif
template <typename scalar_t, bool need_check>
static __global__ void
#if defined(USE_ROCM)
__launch_bounds__(WARP_SIZE_GGUF* NWARPS_Q6_K, 2)
#endif
mul_mat_q6_K(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst) {
const int mmq_x = MMQ_X_Q6_K;
const int mmq_y = MMQ_Y_Q6_K;
const int nwarps = NWARPS_Q6_K;
mul_mat_q<
scalar_t,
QK_K,
QR6_K,
QI6_K,
false,
block_q6_K,
mmq_x,
mmq_y,
nwarps,
allocate_tiles_q6_K<mmq_y>,
load_tiles_q6_K<mmq_y, nwarps, need_check>,
VDR_Q6_K_Q8_1_MMQ,
vec_dot_q6_K_q8_1_mul_mat>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
template <typename scalar_t>
static void ggml_mul_mat_q6_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols_x,
const int nrows_x,
const int ncols_y,
const int nrows_y,
const int nrows_dst,
cudaStream_t stream) {
const int mmq_x = MMQ_X_Q6_K;
const int mmq_y = MMQ_Y_Q6_K;
const int nwarps = NWARPS_Q6_K;
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q6_K<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q6_K<scalar_t, need_check>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}

View File

@@ -0,0 +1,352 @@
// copied from
// https://github.com/vllm-project/vllm/blob/4492e3a55428e161ca8db381edc28263e5da4c8d/csrc/quantization/gguf/mmvq.cuh
// copied and adapted from https://github.com/ggerganov/llama.cpp/blob/b2899/ggml-cuda/mmvq.cu
template <typename scalar_t, int qk, int qi, typename block_q_t, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda>
static __global__ void mul_mat_vec_q(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int ncols,
const int nrows,
const int nvecs) {
const auto row = blockIdx.x * blockDim.y + threadIdx.y;
const auto vec = blockIdx.y;
if (row >= nrows || vec >= nvecs) {
return;
}
const int blocks_per_row = ncols / qk;
const int blocks_per_warp = vdr * WARP_SIZE / qi;
const int nrows_y = (ncols + 512 - 1) / 512 * 512;
// partial sum for each thread
float tmp = 0.0f;
const block_q_t* x = (const block_q_t*)vx;
const block_q8_1* y = (const block_q8_1*)vy;
for (auto i = threadIdx.x / (qi / vdr); i < blocks_per_row; i += blocks_per_warp) {
const int ibx = row * blocks_per_row + i; // x block index
const int iby = vec * (nrows_y / QK8_1) + i * (qk / QK8_1); // y block index that aligns with ibx
const int iqs = vdr * (threadIdx.x % (qi / vdr)); // x block quant index when casting the quants to int
tmp += vec_dot_q_cuda(&x[ibx], &y[iby], iqs);
}
// sum up partial sums and write back result
#pragma unroll
for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp += SGLANG_SHFL_XOR_SYNC(uint32_t(-1), tmp, mask);
}
if (threadIdx.x == 0) {
dst[vec * nrows + row] = tmp;
}
}
template <typename scalar_t>
static void mul_mat_vec_q4_0_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_q4_1_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK4_0, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_q5_0_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_q5_1_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_q8_0_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_q2_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_q3_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_q4_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_q5_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_q6_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_iq2_xxs_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI2_XXS, block_iq2_xxs, 1, vec_dot_iq2_xxs_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_iq2_xs_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI2_XS, block_iq2_xs, 1, vec_dot_iq2_xs_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_iq2_s_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI2_S, block_iq2_s, 1, vec_dot_iq2_s_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_iq3_xxs_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI3_XXS, block_iq3_xxs, 1, vec_dot_iq3_xxs_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_iq1_s_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI1_S, block_iq1_s, 1, vec_dot_iq1_s_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_iq1_m_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI1_M, block_iq1_m, 1, vec_dot_iq1_m_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_iq4_nl_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK4_NL, QI4_NL, block_iq4_nl, VDR_Q4_0_Q8_1_MMVQ, vec_dot_iq4_nl_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_iq4_xs_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI4_XS, block_iq4_xs, 1, vec_dot_iq4_xs_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}
template <typename scalar_t>
static void mul_mat_vec_iq3_s_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int ncols,
const int nrows,
const int nvecs,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, nvecs, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<scalar_t, QK_K, QI3_XS, block_iq3_s, 1, vec_dot_iq3_s_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows, nvecs);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,413 @@
// copied from
// https://github.com/vllm-project/vllm/blob/4492e3a55428e161ca8db381edc28263e5da4c8d/csrc/quantization/gguf/moe_vec.cuh
// copied and adapted from
// https://github.com/ggerganov/llama.cpp/blob/b2899/ggml-cuda/mmvq.cu
template <typename scalar_t, int qk, int qi, typename block_q_t, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda>
static __global__ void moe_vec_q(
const void* __restrict__ vx,
const void* __restrict__ vy,
scalar_t* __restrict__ dst,
const int* topk_ids,
const int topk,
const int ncols,
const int nrows,
const int token_stride) {
const auto row = blockIdx.x * blockDim.y + threadIdx.y;
const auto token = blockIdx.z / topk;
const auto expert = (topk_ids)[blockIdx.z];
if (row >= nrows) {
return;
}
const int blocks_per_row = ncols / qk;
const int blocks_per_warp = vdr * WARP_SIZE / qi;
// partial sum for each thread
float tmp = 0.0f;
const block_q_t* x = ((const block_q_t*)vx) + expert * nrows * blocks_per_row;
const block_q8_1* y = (const block_q8_1*)(((const int*)vy) + token * token_stride);
for (auto i = threadIdx.x / (qi / vdr); i < blocks_per_row; i += blocks_per_warp) {
const int ibx = row * blocks_per_row + i; // x block index
const int iby = i * (qk / QK8_1); // y block index that aligns with ibx
const int iqs = vdr * (threadIdx.x % (qi / vdr)); // x block quant index when casting the quants to int
tmp += vec_dot_q_cuda(&x[ibx], &y[iby], iqs);
}
// sum up partial sums and write back result
#pragma unroll
for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp += SGLANG_SHFL_XOR_SYNC(uint32_t(-1), tmp, mask);
}
if (threadIdx.x == 0) {
dst[blockIdx.z * nrows + row] = tmp;
}
}
template <typename scalar_t>
static void moe_vec_q4_0_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_q4_1_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK4_0, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_q5_0_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_q5_1_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_q8_0_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_q2_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_q3_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_q4_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_q5_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_q6_K_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_iq2_xxs_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI2_XXS, block_iq2_xxs, 1, vec_dot_iq2_xxs_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_iq2_xs_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI2_XS, block_iq2_xs, 1, vec_dot_iq2_xs_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_iq2_s_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI2_S, block_iq2_s, 1, vec_dot_iq2_s_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_iq3_xxs_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI3_XXS, block_iq3_xxs, 1, vec_dot_iq3_xxs_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_iq1_s_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI1_S, block_iq1_s, 1, vec_dot_iq1_s_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_iq1_m_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI1_M, block_iq1_m, 1, vec_dot_iq1_m_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_iq4_nl_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK4_NL, QI4_NL, block_iq4_nl, VDR_Q4_0_Q8_1_MMVQ, vec_dot_iq4_nl_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_iq4_xs_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI4_XS, block_iq4_xs, 1, vec_dot_iq4_xs_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}
template <typename scalar_t>
static void moe_vec_iq3_s_q8_1_cuda(
const void* vx,
const void* vy,
scalar_t* dst,
const int* topk_ids,
const int top_k,
const int tokens,
const int ncols,
const int nrows,
const int token_stride,
cudaStream_t stream) {
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(block_num_y, 1, tokens * top_k);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
moe_vec_q<scalar_t, QK_K, QI3_XS, block_iq3_s, 1, vec_dot_iq3_s_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, topk_ids, top_k, ncols, nrows, token_stride);
}

File diff suppressed because it is too large Load Diff

View File

@@ -186,6 +186,32 @@ void fast_topk_transform_interface(
void gelu_quick(at::Tensor& out, const at::Tensor& input);
#endif
/*
* From gguf quantization
*/
torch::Tensor
ggml_dequantize(torch::Tensor W, int64_t type, int64_t m, int64_t n, std::optional<at::ScalarType> const& dtype);
torch::Tensor ggml_mul_mat_vec_a8(torch::Tensor W, torch::Tensor X, int64_t type, int64_t row);
torch::Tensor ggml_mul_mat_a8(torch::Tensor W, torch::Tensor X, int64_t type, int64_t row);
torch::Tensor ggml_moe_a8(
torch::Tensor X,
torch::Tensor W,
torch::Tensor sorted_token_ids,
torch::Tensor expert_ids,
torch::Tensor num_tokens_post_padded,
int64_t type,
int64_t row,
int64_t top_k,
int64_t tokens);
torch::Tensor ggml_moe_a8_vec(
torch::Tensor X, torch::Tensor W, torch::Tensor topk_ids, int64_t top_k, int64_t type, int64_t row, int64_t tokens);
int64_t ggml_moe_get_block_size(int64_t type);
/*
* From csrc/gemm
*/
@@ -306,6 +332,8 @@ void topk_softmax(
void moe_sum_reduce(at::Tensor& input, at::Tensor& output, double routed_scaling_factor);
void moe_sum(torch::Tensor& input, torch::Tensor& output);
std::vector<at::Tensor> moe_fused_gate(
at::Tensor& input,
at::Tensor& bias,

View File

@@ -19,6 +19,10 @@ limitations under the License.
#include <cuda_runtime.h>
#include <torch/all.h>
#ifdef USE_ROCM
#include <hip/hip_runtime.h>
#endif
#ifdef USE_ROCM
// Adapted from flashinfer-rocm [PR#491](https://github.com/flashinfer-ai/flashinfer/pull/491)
#define _DISPATCH_CASE_F16(c_type, ...) \
@@ -326,6 +330,13 @@ inline bool getEnvEnablePDL() {
#define DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \
AT_DISPATCH_SWITCH(TYPE, NAME, DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__))
#define DISPATCH_CASE_FLOAT_TYPES(...) \
AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__)
#define DISPATCH_FLOAT_TYPES(TYPE, NAME, ...) AT_DISPATCH_SWITCH(TYPE, NAME, DISPATCH_CASE_FLOAT_TYPES(__VA_ARGS__))
#define CEILDIV(x, y) (((x) + (y) - 1) / (y))
#ifndef USE_ROCM
@@ -447,3 +458,12 @@ inline uint32_t next_pow2(uint32_t x) noexcept {
if (x <= 1) return 1;
return 1u << (32 - __builtin_clz(x - 1));
}
/*
* LDG Support
*/
#ifndef USE_ROCM
#define SGLANG_LDG(arg) __ldg(arg)
#else
#define SGLANG_LDG(arg) *(arg)
#endif

View File

@@ -288,10 +288,19 @@ from sgl_kernel.moe import (
fp8_blockwise_scaled_grouped_mm,
moe_align_block_size,
moe_fused_gate,
moe_sum,
moe_sum_reduce,
prepare_moe_input,
topk_softmax,
)
from sgl_kernel.quantization import (
ggml_dequantize,
ggml_moe_a8,
ggml_moe_a8_vec,
ggml_moe_get_block_size,
ggml_mul_mat_a8,
ggml_mul_mat_vec_a8,
)
from sgl_kernel.sampling import (
min_p_sampling_from_probs,
top_k_mask_logits,

View File

@@ -48,6 +48,16 @@ def moe_sum_reduce(
)
def moe_sum(
input_tensor: torch.Tensor,
output_tensor: torch.Tensor,
):
torch.ops.sgl_kernel.moe_sum.default(
input_tensor,
output_tensor,
)
def moe_fused_gate(
input_tensor,
bias,

View File

@@ -0,0 +1,8 @@
from .gguf import (
ggml_dequantize,
ggml_moe_a8,
ggml_moe_a8_vec,
ggml_moe_get_block_size,
ggml_mul_mat_a8,
ggml_mul_mat_vec_a8,
)

View File

@@ -0,0 +1,62 @@
import torch
def ggml_dequantize(
weight: torch.Tensor, quant_type: int, M: int, N: int, dtype: torch.dtype
):
assert M > 0 and N > 0, "GGUF weight Input shape must be of positive dimensions"
return torch.ops.sgl_kernel.ggml_dequantize.default(weight, quant_type, M, N, dtype)
def ggml_mul_mat_vec_a8(
weight: torch.Tensor, x: torch.Tensor, quant_type: int, row: int
) -> torch.Tensor:
return torch.ops.sgl_kernel.ggml_mul_mat_vec_a8.default(weight, x, quant_type, row)
def ggml_mul_mat_a8(
weight: torch.Tensor, x: torch.Tensor, quant_type: int, row: int
) -> torch.Tensor:
return torch.ops.sgl_kernel.ggml_mul_mat_a8.default(weight, x, quant_type, row)
def ggml_moe_a8(
input: torch.Tensor,
weight: torch.Tensor,
sorted_token_ids: torch.Tensor,
expert_ids: torch.Tensor,
num_token_post_padded: torch.Tensor,
type: int,
row: int,
topk: int,
tokens: int,
) -> torch.Tensor:
return torch.ops.sgl_kernel.ggml_moe_a8.default(
input,
weight,
sorted_token_ids,
expert_ids,
num_token_post_padded,
type,
row,
topk,
tokens,
)
def ggml_moe_a8_vec(
input: torch.Tensor,
weight: torch.Tensor,
topk_ids: torch.Tensor,
top_k: int,
type: int,
row: int,
tokens: int,
) -> torch.Tensor:
return torch.ops.sgl_kernel.ggml_moe_a8_vec.default(
input, weight, topk_ids, top_k, type, row, tokens
)
def ggml_moe_get_block_size(type: int) -> int:
return torch.ops.sgl_kernel.ggml_moe_get_block_size.default(type)

View File

@@ -0,0 +1,160 @@
# SPDX-License-Identifier: Apache-2.0
import random
from pathlib import Path
import numpy as np
import pytest
import torch
from gguf import GGMLQuantizationType, GGUFReader, ReaderTensor, dequantize
from huggingface_hub import snapshot_download
from sgl_kernel import (
ggml_dequantize,
ggml_moe_a8,
ggml_moe_a8_vec,
ggml_moe_get_block_size,
ggml_mul_mat_a8,
ggml_mul_mat_vec_a8,
)
GGUF_SAMPLE = snapshot_download("Isotr0py/test-gguf-sample")
GGUF_SAMPLE_MOE = snapshot_download("SzymonOzog/test-gguf-moe-sample")
def get_gguf_sample_tensors(
hidden_size: int, quant_type: GGMLQuantizationType
) -> list[ReaderTensor]:
sample_dir = GGUF_SAMPLE
filename = f"Quant_{quant_type.name}_{hidden_size}.gguf"
sample_file = Path(sample_dir) / filename
return GGUFReader(sample_file).tensors
def get_gguf_MoE_tensors(
hidden_size: int, quant_type: GGMLQuantizationType
) -> list[ReaderTensor]:
sample_dir = GGUF_SAMPLE_MOE
filename = f"Quant_{quant_type.name}_{hidden_size}.gguf"
sample_file = Path(sample_dir) / filename
return GGUFReader(sample_file).tensors
DTYPES = [torch.bfloat16] # [torch.half, torch.bfloat16, torch.float32]
# Hidden_size for testing, must match the sample file in HF repo,
# we have `hidden_size = 256, 1024` for test in HF repo currently.
HIDDEN_SIZES = [256, 1024]
NUM_TOKENS = [7, 2050] # Arbitrary values for testing
SEEDS = [0]
QUANT_TYPES = [
# i-matrix
GGMLQuantizationType.IQ1_M,
GGMLQuantizationType.IQ1_S,
GGMLQuantizationType.IQ2_S,
GGMLQuantizationType.IQ2_XS,
GGMLQuantizationType.IQ3_S,
GGMLQuantizationType.IQ3_XXS,
GGMLQuantizationType.IQ4_NL,
GGMLQuantizationType.IQ4_XS,
# k-quants
GGMLQuantizationType.Q2_K,
GGMLQuantizationType.Q3_K,
GGMLQuantizationType.Q4_K,
GGMLQuantizationType.Q5_K,
GGMLQuantizationType.Q6_K,
# standard quantization
GGMLQuantizationType.Q4_0,
GGMLQuantizationType.Q5_0,
GGMLQuantizationType.Q8_0,
]
@pytest.mark.parametrize("hidden_size", HIDDEN_SIZES)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("quant_type", QUANT_TYPES)
@torch.inference_mode()
def test_dequantize(
hidden_size: int, dtype: torch.dtype, quant_type: GGMLQuantizationType
):
tensors = get_gguf_sample_tensors(hidden_size, quant_type)
for tensor in tensors:
shape_str = tensor.name.split("_")[-1]
shape = map(int, shape_str.split("x"))
ref_output = torch.tensor(
dequantize(tensor.data, quant_type), device="cuda"
).to(dtype)
output = ggml_dequantize(
torch.tensor(tensor.data, device="cuda"), quant_type, *list(shape), dtype
)
torch.testing.assert_close(output, ref_output, atol=1e-2, rtol=4e-2)
@pytest.mark.parametrize("hidden_size", HIDDEN_SIZES)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("quant_type", QUANT_TYPES)
@torch.inference_mode()
def test_mmvq(hidden_size: int, dtype: torch.dtype, quant_type: GGMLQuantizationType):
tensors = get_gguf_sample_tensors(hidden_size, quant_type)
x = torch.rand((1, hidden_size), dtype=dtype, device="cuda")
for tensor in tensors:
weight = torch.tensor(dequantize(tensor.data, quant_type), device="cuda").to(
dtype
)
ref_output = x @ weight.T
qweight = torch.tensor(tensor.data, device="cuda")
output = ggml_mul_mat_vec_a8(qweight, x, quant_type, qweight.shape[0]).to(dtype)
torch.testing.assert_close(output, ref_output, atol=1, rtol=1e-1)
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
@pytest.mark.parametrize("hidden_size", HIDDEN_SIZES)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize(
"quant_type",
[
# k-quants
GGMLQuantizationType.Q2_K,
GGMLQuantizationType.Q3_K,
GGMLQuantizationType.Q4_K,
GGMLQuantizationType.Q5_K,
GGMLQuantizationType.Q6_K,
# standard quants
GGMLQuantizationType.Q4_0,
GGMLQuantizationType.Q5_0,
GGMLQuantizationType.Q8_0,
],
)
@torch.inference_mode()
def test_mmq(
num_tokens: int,
hidden_size: int,
dtype: torch.dtype,
quant_type: GGMLQuantizationType,
):
tensors = get_gguf_sample_tensors(hidden_size, quant_type)
x = torch.rand((num_tokens, hidden_size), dtype=dtype, device="cuda")
for tensor in tensors:
weight = torch.tensor(dequantize(tensor.data, quant_type), device="cuda").to(
dtype
)
ref_output = x @ weight.T
qweight = torch.tensor(tensor.data, device="cuda")
output = ggml_mul_mat_a8(qweight, x, quant_type, qweight.shape[0])
atols = {torch.half: 1, torch.bfloat16: 1.5, torch.float: 1.2}
# test matrix has inputs centered around 0 and lower precision from
# bfloat16 tends to accumulate and can greatly inflate rtol
# since outputs are also very close to 0
rtols = {torch.half: 1e-1, torch.bfloat16: 1e4, torch.float: 2e1}
torch.testing.assert_close(
output, ref_output, atol=atols[dtype], rtol=rtols[dtype]
)
if __name__ == "__main__":
pytest.main([__file__])

View File

@@ -4,7 +4,14 @@ import pytest
import torch
import triton
import triton.language as tl
from sgl_kernel import moe_align_block_size
from sgl_kernel import moe_align_block_size, moe_sum
def is_hip() -> bool:
return torch.version.hip is not None
_is_hip = is_hip()
def ceil_div(a, b):
@@ -246,5 +253,20 @@ def test_moe_align_block_size_compare_implementations(
)
@pytest.mark.parametrize("m", [1, 33, 64, 222])
@pytest.mark.parametrize("topk", [2, 6])
@pytest.mark.parametrize("k", [128, 511, 1024])
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.skipif(_is_hip, reason="Skip for AMD GPU")
def test_moe_sum(m: int, topk: int, k: int, dtype: torch.dtype):
input = torch.randn((m, topk, k), device="cuda", dtype=dtype)
actual = torch.empty((m, k), device="cuda", dtype=dtype)
expected = input.sum(dim=1)
moe_sum(input, actual)
torch.testing.assert_close(actual, expected, atol=2e-2, rtol=0)
if __name__ == "__main__":
pytest.main([__file__])