[Feature] support compressed-tensors w4a16 quantization (#154)
- native int4 kimi model inference is supported Signed-off-by: Li Wei <liwei.109@outlook.com>
This commit is contained in:
@@ -0,0 +1,57 @@
|
||||
#
|
||||
# Copyright (c) 2026 Baidu, Inc. All Rights Reserved.
|
||||
# Author: Li Wei
|
||||
# Email: liwei157@baidu.com
|
||||
# This file is a part of the vllm-kunlun project.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import xspeedgate_ops
|
||||
from vllm.model_executor.layers.quantization.kernels.mixed_precision import (
|
||||
ExllamaLinearKernel,
|
||||
_POSSIBLE_KERNELS,
|
||||
)
|
||||
|
||||
|
||||
class KunlunExllamaLinearKernel(ExllamaLinearKernel):
|
||||
|
||||
def apply_weights(
|
||||
self,
|
||||
layer: torch.nn.Module,
|
||||
x: torch.Tensor,
|
||||
bias: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
c = self.config
|
||||
|
||||
x_2d = x.reshape(-1, x.shape[-1])
|
||||
out_shape = x.shape[:-1] + (c.partition_weight_shape[1],)
|
||||
|
||||
w_q, w_s, w_zp, w_g_idx = self._get_weight_params(layer)
|
||||
|
||||
assert w_zp is not None, "Zero points are required by Exllama"
|
||||
assert w_g_idx is not None, "Group index is required by Exllama"
|
||||
output = torch.ops.xspeedgate_ops.gptq_gemm(
|
||||
x_2d, w_q, w_zp, w_s, w_g_idx, True, c.weight_type.size_bits
|
||||
)
|
||||
|
||||
if bias is not None:
|
||||
output.add_(bias)
|
||||
return output.reshape(out_shape)
|
||||
|
||||
|
||||
# remove ExllamaLinearKernel and add KunlunExllamaLinearKernel
|
||||
_POSSIBLE_KERNELS.remove(ExllamaLinearKernel)
|
||||
_POSSIBLE_KERNELS.append(KunlunExllamaLinearKernel)
|
||||
@@ -99,12 +99,5 @@ class KunlunScaledMMLinearKernel(CutlassScaledMMLinearKernel):
|
||||
# )
|
||||
|
||||
|
||||
# monkey patch
|
||||
_POSSIBLE_KERNELS[PlatformEnum.CUDA] = [KunlunScaledMMLinearKernel]
|
||||
from vllm.model_executor.layers.quantization.kernels.scaled_mm import cutlass
|
||||
|
||||
cutlass.CutlassScaledMMLinearKernel = KunlunScaledMMLinearKernel
|
||||
print(
|
||||
"[Monkey Patch Applied] >>> vllm.model_executor.layers.quantization.kernels.scaled_mm.cutlass.CutlassScaledMMLinearKernel \
|
||||
--> vllm_kunlun.ops.quantization.kernels.kunlun_scale_mm.KunlunScaledMMLinearKernel"
|
||||
)
|
||||
# replace CutlassScaledMMLinearKernel with KunlunScaledMMLinearKernel
|
||||
_POSSIBLE_KERNELS[PlatformEnum.CUDA] = [KunlunScaledMMLinearKernel]
|
||||
@@ -1,7 +1,7 @@
|
||||
#
|
||||
# Copyright (c) 2025 Baidu, Inc. All Rights Reserved.
|
||||
# Author: Tang Shiwen
|
||||
# Email: tangshiwen@baidu.com
|
||||
# Copyright (c) 2026 Baidu, Inc. All Rights Reserved.
|
||||
# Author: Tang Shiwen, Li Wei
|
||||
# Email: tangshiwen@baidu.com, liwei157@baidu.com
|
||||
# This file is a part of the vllm-kunlun project.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -66,3 +66,21 @@ def dequant_int4(
|
||||
)
|
||||
|
||||
return fpweight.transpose(1, 2).contiguous()
|
||||
|
||||
|
||||
def dequant_int4_native(weight_packed_uint8: torch.Tensor, scale: torch.Tensor):
|
||||
"""Unpack uint4 weight from packed uint8 weight and dequant it to float16."""
|
||||
weight_upacked_fp16 = (
|
||||
torch.stack(
|
||||
(weight_packed_uint8 & 0xF, (weight_packed_uint8 >> 4) & 0xF),
|
||||
dim=-1,
|
||||
)
|
||||
.reshape(*weight_packed_uint8.shape[:-1], -1)
|
||||
.contiguous()
|
||||
.to(torch.float16)
|
||||
- 8.0
|
||||
)
|
||||
weight_upacked_fp16 *= scale.repeat(
|
||||
1, 1, weight_upacked_fp16.shape[-1] // scale.shape[-1]
|
||||
)
|
||||
return weight_upacked_fp16
|
||||
|
||||
Reference in New Issue
Block a user