* oai moe * compat with new checkpoint * add attn sink impl * add rope scaling yarn * logits match with latest transformers code * wip chat template * rm trailing space * use ggml_scale_bias * rm redundant is_swa_all * convert interleaved gate_up * graph : fix activation function to match reference (#7) * vocab : handle o200k_harmony special tokens * ggml : add attention sinks support (#1) * llama : add attn sinks * ggml : add attn sinks * cuda : add attn sinks * vulkan : add support for sinks in softmax remove unnecessary return * ggml : add fused swiglu_oai op (#11) * ggml : add fused swiglu_oai op * Update ggml/src/ggml-cpu/ops.cpp Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * update CUDA impl * cont : metal impl * add vulkan impl * test-backend-ops : more test cases, clean up * llama : remove unfused impl * remove extra lines --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> --------- Co-authored-by: slaren <slarengh@gmail.com> * repack mxfp4 upon conversion * clean up a bit * enable thinking * add quick hack to render only some special tokens * fix bf16 conversion * remove vocab hack * webui ok * support chat parsing for gpt-oss * fix webui * direct mapping mxfp4, FINALLY * force using mxfp4 * properly use lazy tensor * ggml : add mxfp4 ggml : use e8m0 conversion instead of powf Co-authored-by: Diego Devesa <slarengh@gmail.com> change kvalues_mxfp4 table to match e2m1 (#6) metal : remove quantization for now (not used) cuda : fix disabled CUDA graphs due to ffn moe bias vulkan : add support for mxfp4 cont : add cm2 dequant * ggml : add ggml_add_id (#13) * ggml : add ggml_add_id * add cuda impl * llama : add weight support check for add_id * perf opt * add vulkan impl * rename cuda files * add metal impl * allow in-place ggml_add_id * llama : keep biases on CPU with --cpu-moe * llama : fix compile error ggml-ci * cuda : add fallback for __nv_cvt_e8m0_to_bf16raw ggml-ci * cleanup ggml-ci * sycl : fix supports_op for MXFP4 ggml-ci * fix Unknown reasoning format * ggml-cpu : fix AVX build ggml-ci * fix hip build ggml-ci * cuda : add mxfp4 dequantization support for cuBLAS ggml-ci * ggml-cpu : fix mxfp4 fallback definitions for some architectures ggml-ci * cuda : fix version required for __nv_cvt_e8m0_to_bf16raw --------- Co-authored-by: Xuan Son Nguyen <son@huggingface.co> Co-authored-by: slaren <slarengh@gmail.com>
106 lines
3.7 KiB
Plaintext
106 lines
3.7 KiB
Plaintext
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
|
|
#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
|
|
#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
|
|
|
|
#include "types.comp"
|
|
|
|
// Each iqs value maps to a 32-bit integer
|
|
|
|
#if defined(DATA_A_Q4_0)
|
|
i32vec2 repack(uint ib, uint iqs) {
|
|
// Use 2-byte loads since a q4_0 block (18 bytes) is not divisible by 4
|
|
const u16vec2 quants = u16vec2(data_a[ib].qs[iqs * 2 ],
|
|
data_a[ib].qs[iqs * 2 + 1]);
|
|
const uint32_t vui = pack32(quants);
|
|
return i32vec2( vui & 0x0F0F0F0F,
|
|
(vui >> 4) & 0x0F0F0F0F);
|
|
}
|
|
|
|
ACC_TYPE mul_q8_1(int32_t q_sum, float da, vec2 dsb) {
|
|
return ACC_TYPE(da * (float(q_sum) * dsb.x - 8.0f * dsb.y));
|
|
}
|
|
#endif
|
|
|
|
#if defined(DATA_A_Q4_1)
|
|
i32vec2 repack(uint ib, uint iqs) {
|
|
// Use 4-byte loads since a q4_1 block (20 bytes) is divisible by 4
|
|
const uint32_t vui = data_a_packed32[ib].qs[iqs];
|
|
return i32vec2( vui & 0x0F0F0F0F,
|
|
(vui >> 4) & 0x0F0F0F0F);
|
|
}
|
|
|
|
ACC_TYPE mul_q8_1(int32_t q_sum, vec2 dma, vec2 dsb) {
|
|
return ACC_TYPE(float(q_sum) * dma.x * dsb.x + dma.y * dsb.y);
|
|
}
|
|
#endif
|
|
|
|
#if defined(DATA_A_Q5_0)
|
|
i32vec2 repack(uint ib, uint iqs) {
|
|
// Use 2-byte loads since a q5_0 block (22 bytes) is not divisible by 4
|
|
const u16vec2 quants = u16vec2(data_a[ib].qs[iqs * 2 ],
|
|
data_a[ib].qs[iqs * 2 + 1]);
|
|
const uint32_t vui = pack32(quants);
|
|
const int32_t qh = int32_t((uint32_t(data_a[ib].qh[1]) << 16 | data_a[ib].qh[0]) >> (4 * iqs));
|
|
const int32_t v0 = int32_t(vui & 0x0F0F0F0F)
|
|
| ((qh & 0xF) * 0x02040810) & 0x10101010; // (0,1,2,3) -> (4,12,20,28)
|
|
|
|
const int32_t v1 = int32_t((vui >> 4) & 0x0F0F0F0F)
|
|
| (((qh >> 16) & 0xF) * 0x02040810) & 0x10101010; // (16,17,18,19) -> (4,12,20,28)
|
|
|
|
return i32vec2(v0, v1);
|
|
}
|
|
|
|
ACC_TYPE mul_q8_1(int32_t q_sum, float da, vec2 dsb) {
|
|
return ACC_TYPE(da * (float(q_sum) * dsb.x - 16.0f * dsb.y));
|
|
}
|
|
#endif
|
|
|
|
#if defined(DATA_A_Q5_1)
|
|
i32vec2 repack(uint ib, uint iqs) {
|
|
// Use 4-byte loads since a q5_1 block (24 bytes) is divisible by 4
|
|
const uint32_t vui = data_a_packed32[ib].qs[iqs];
|
|
const int32_t qh = int32_t(data_a_packed32[ib].qh >> (4 * iqs));
|
|
const int32_t v0 = int32_t(vui & 0x0F0F0F0F)
|
|
| ((qh & 0xF) * 0x02040810) & 0x10101010; // (0,1,2,3) -> (4,12,20,28)
|
|
|
|
const int32_t v1 = int32_t((vui >> 4) & 0x0F0F0F0F)
|
|
| (((qh >> 16) & 0xF) * 0x02040810) & 0x10101010; // (16,17,18,19) -> (4,12,20,28)
|
|
|
|
return i32vec2(v0, v1);
|
|
}
|
|
|
|
ACC_TYPE mul_q8_1(int32_t q_sum, vec2 dma, vec2 dsb) {
|
|
return ACC_TYPE(float(q_sum) * dma.x * dsb.x + dma.y * dsb.y);
|
|
}
|
|
#endif
|
|
|
|
#if defined(DATA_A_Q8_0)
|
|
int32_t repack(uint ib, uint iqs) {
|
|
// Use 2-byte loads since a q8_0 block (34 bytes) is not divisible by 4
|
|
return pack32(i16vec2(data_a[ib].qs[iqs * 2 ],
|
|
data_a[ib].qs[iqs * 2 + 1]));
|
|
}
|
|
|
|
ACC_TYPE mul_q8_1(int32_t q_sum, float da, vec2 dsb) {
|
|
return ACC_TYPE(float(q_sum) * da * dsb.x);
|
|
}
|
|
#endif
|
|
|
|
#if defined(DATA_A_Q4_0) || defined(DATA_A_Q5_0) || defined(DATA_A_Q8_0) || defined(DATA_A_IQ1_S) || defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL)
|
|
FLOAT_TYPE get_d(uint ib) {
|
|
return FLOAT_TYPE(data_a[ib].d);
|
|
}
|
|
#endif
|
|
|
|
#if defined(DATA_A_MXFP4)
|
|
FLOAT_TYPE get_d(uint ib) {
|
|
return FLOAT_TYPE(e8m0_to_fp32(data_a[ib].e));
|
|
}
|
|
#endif
|
|
|
|
#if defined(DATA_A_Q4_1) || defined(DATA_A_Q5_1)
|
|
FLOAT_TYPE_VEC2 get_dm(uint ib) {
|
|
return FLOAT_TYPE_VEC2(data_a_packed32[ib].dm);
|
|
}
|
|
#endif
|