[dev] support compressed-tensors w8a8 quantization (#75)

* [dev] support compressed-tensors w8a8 quantization

Co-authored-by: Li Wei <liwei.109@outlook.com>

* [refact]update KunlunScaleMMKernel impl

* [rebase]resolve conflicts and remove redundant code

---------

Co-authored-by: tangshiwen <tangshiwen@baidu.com>
This commit is contained in:
Li Wei
2026-01-06 13:51:53 +08:00
committed by GitHub
parent ee0f50e68f
commit 515a4eeda9
8 changed files with 952 additions and 523 deletions

View File

@@ -9,7 +9,7 @@ blake3==1.0.5
cachetools==6.1.0
cbor2==5.7.0
cloudpickle==3.1.1
compressed-tensors==0.11.0
compressed-tensors==0.13.0
diskcache==5.6.3
gguf==0.17.1
mistral_common==1.8.3

View File

@@ -173,10 +173,8 @@ class Qwen3MoeSparseMoeBlock(nn.Module):
# router_logits: (num_tokens, n_experts)
router_logits, _ = self.gate(hidden_states)
kunlun_linear_weights = self.gate.get_weights()
final_hidden_states = self.experts(hidden_states=hidden_states,
router_logits=router_logits,
linear_weights=kunlun_linear_weights)
router_logits=router_logits)
if self.is_sequence_parallel:
final_hidden_states = tensor_model_parallel_all_gather(

View File

@@ -21,7 +21,8 @@ import vllm_kunlun.ops.quantization.awq
import vllm_kunlun.ops.quantization.gptq
import vllm_kunlun.ops.vocab_parallel_embedding
import vllm_kunlun.ops.linear
import vllm_kunlun.ops.quantization.kernels.scaled_mm.cutlass
import vllm_kunlun.ops.vocab_parallel_embedding
import vllm_kunlun.ops.quantization.compressed_tensors_moe
import vllm_kunlun.ops.fused_moe.layer
# import vllm_kunlun.ops.quantization.kernels.scaled_mm.cutlass
import vllm_kunlun.ops.fused_moe.layer
import vllm_kunlun.ops.quantization.compressed_tensors.compressed_tensors
import vllm_kunlun.ops.quantization.compressed_tensors.compressed_tensors_moe
import vllm_kunlun.ops.quantization.kernels.scaled_mm.kunlun

View File

@@ -0,0 +1,75 @@
#
# Copyright (c) 2025 Baidu, Inc. All Rights Reserved.
# Author: Tang Shiwen, Li Wei
# Email: tangshiwen@baidu.com, liwei157@baidu.com
# This file is a part of the vllm-kunlun project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from vllm.model_executor.layers.linear import (
LinearBase,
LinearMethodBase,
UnquantizedLinearMethod,
)
from vllm.model_executor.layers.quantization.compressed_tensors.compressed_tensors import ( # noqa: E501
CompressedTensorsConfig,
CompressedTensorsLinearMethod,
CompressedTensorsMoEMethod,
CompressedTensorsKVCacheMethod,
CompressedTensorsLinearTransformMethod,
get_linear_transform_schemes,
)
from vllm.model_executor.layers.quantization.base_config import QuantizeMethodBase
from vllm_kunlun.ops.fused_moe.layer import FusedMoE
def get_quant_method(
self,
layer: torch.nn.Module,
prefix: str,
) -> Optional["QuantizeMethodBase"]:
from vllm_kunlun.ops.attention.layer import Attention # Avoid circular import
if isinstance(layer, LinearBase):
# collect schemes
quant_scheme = self.get_scheme(layer=layer, layer_name=prefix)
input_tfms, output_tfms = get_linear_transform_schemes(
layer, prefix, self.transform_config, self.packed_modules_mapping
)
# choose quantization method
quant_method: LinearMethodBase = UnquantizedLinearMethod()
if quant_scheme is not None:
layer.scheme = quant_scheme
quant_method = CompressedTensorsLinearMethod(self)
# choose transform method
if any((input_tfms, output_tfms)):
return CompressedTensorsLinearTransformMethod.from_schemes(
quant_method, quant_scheme, input_tfms, output_tfms
)
else:
return quant_method
if isinstance(layer, Attention):
return CompressedTensorsKVCacheMethod(self)
if isinstance(layer, FusedMoE):
return CompressedTensorsMoEMethod.get_moe_method(self, layer)
return None
CompressedTensorsConfig.get_quant_method = get_quant_method

View File

@@ -1,26 +1,35 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
#
# Copyright (c) 2025 Baidu, Inc. All Rights Reserved.
# Author: Li Wei, Tang Shiwen
# Email: liwei157@baidu.com, tangshiwen@baidu.com
# This file is a part of the vllm-kunlun project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from enum import Enum
from typing import Callable, Optional, Union
import torch
from vllm.model_executor.layers.quantization.compressed_tensors.compressed_tensors_moe import CompressedTensorsW8A8Int8MoEMethod
from vllm.model_executor.layers.quantization.compressed_tensors.compressed_tensors_moe import (
CompressedTensorsW8A8Int8MoEMethod,
)
def klx_process_weights_after_loading(layer: torch.nn.Module) -> None:
"""modify scale -> abs max"""
layer.w13_weight = torch.nn.Parameter(layer.w13_weight, requires_grad=False)
layer.w2_weight = torch.nn.Parameter(layer.w2_weight, requires_grad=False)
layer.w13_weight_scale = torch.nn.Parameter(
layer.w13_weight_scale.data * 127, requires_grad=False
)
layer.w2_weight_scale = torch.nn.Parameter(
layer.w2_weight_scale.data * 127, requires_grad=False
)
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
klx_process_weights_after_loading(layer)
# NOTE: xtorch_ops use max as scale
with torch.no_grad():
layer.w13_weight_scale.mul_(127.0)
layer.w2_weight_scale.mul_(127.0)
def apply(
self,
@@ -49,14 +58,10 @@ def apply(
global_num_experts, up_gate_size, _ = layer.w13_weight.shape
M, N = hidden_states.shape
hidden_dim = layer.w2_weight.shape[1]
normed_score = torch.empty(M,
top_k,
dtype=torch.float32,
device=hidden_states.device)
topk_ids = torch.empty(M,
top_k,
dtype=torch.int32,
device=hidden_states.device)
normed_score = torch.empty(
M, top_k, dtype=torch.float32, device=hidden_states.device
)
topk_ids = torch.empty(M, top_k, dtype=torch.int32, device=hidden_states.device)
num_blocks = 12
block_statistic = torch.zeros(
num_blocks, global_num_experts, dtype=torch.int32, device=hidden_states.device
@@ -69,7 +74,8 @@ def apply(
normed_score=normed_score,
topk_index=topk_ids,
block_statistic=None,
stable=True)
stable=True,
)
elif scoring_func == "sigmoid":
torch.ops._C.moe_sigmoid_group_topk_norm(
x=router_logits,
@@ -82,12 +88,20 @@ def apply(
scale=routed_scaling_factor,
)
moe_expand = torch.empty((M * top_k, N), dtype=hidden_states.dtype, device=hidden_states.device) # [M, top_k, N], float
expert_m = torch.zeros(global_num_experts, dtype=torch.int32, device=hidden_states.device) # [E]
sorted_tokens_num_lod = torch.zeros(global_num_experts + 1, dtype=torch.int32, device=hidden_states.device) # [E+1]
sorted_tokens_idx = torch.zeros(M * top_k, dtype=torch.int32, device=hidden_states.device)
moe_expand = torch.empty(
(M * top_k, N), dtype=hidden_states.dtype, device=hidden_states.device
) # [M, top_k, N], float
expert_m = torch.zeros(
global_num_experts, dtype=torch.int32, device=hidden_states.device
) # [E]
sorted_tokens_num_lod = torch.zeros(
global_num_experts + 1, dtype=torch.int32, device=hidden_states.device
) # [E+1]
sorted_tokens_idx = torch.zeros(
M * top_k, dtype=torch.int32, device=hidden_states.device
)
torch.ops._C.gen_block_statistic(topk_ids,block_statistic)
torch.ops._C.gen_block_statistic(topk_ids, block_statistic)
torch.ops._C.moe_pre_sorted(
x=hidden_states,
@@ -96,18 +110,24 @@ def apply(
moe_expand=moe_expand,
moe_index=sorted_tokens_idx,
expert_m=expert_m,
sorted_tokens_num_lod=sorted_tokens_num_lod)
sorted_tokens_num_lod=sorted_tokens_num_lod,
)
y = torch.empty(M,top_k,
layer.w13_weight.shape[1],
dtype=hidden_states.dtype,
device=hidden_states.device)
y = torch.empty(
M,
top_k,
layer.w13_weight.shape[1],
dtype=hidden_states.dtype,
device=hidden_states.device,
)
moe_expand = moe_expand.view(M * top_k, hidden_dim)
x_shape = moe_expand.shape
x_q = torch.empty(x_shape, dtype=torch.int8, device=moe_expand.device)
x_scale = torch.empty((x_shape[0], 1), dtype=torch.float32, device=moe_expand.device)
x_scale = torch.empty(
(x_shape[0], 1), dtype=torch.float32, device=moe_expand.device
)
torch.ops._C.quant2d(moe_expand, x_q, x_scale, force_sdnn=True)
torch.ops._C.moe_fc(
@@ -121,22 +141,28 @@ def apply(
y=y,
topk_ids=topk_ids,
# sort_mode=False,
act=None)
act=None,
)
d = y.shape[-1] // 2
output_shape = (y.shape[:-1] + (d, ))
output_shape = y.shape[:-1] + (d,)
out1 = torch.empty(output_shape, dtype=y.dtype, device=y.device)
torch.ops._C.silu_and_mul(out1, y)
out = torch.empty(M,top_k,
layer.w2_weight.shape[1],
dtype=hidden_states.dtype,
device=hidden_states.device)
out = torch.empty(
M,
top_k,
layer.w2_weight.shape[1],
dtype=hidden_states.dtype,
device=hidden_states.device,
)
out1 = out1.reshape(-1, out1.shape[-1])
x_shape = out1.shape
x_q = torch.empty(x_shape, dtype=torch.int8, device=moe_expand.device)
x_scale = torch.empty((x_shape[0], 1), dtype=torch.float32, device=moe_expand.device)
x_scale = torch.empty(
(x_shape[0], 1), dtype=torch.float32, device=moe_expand.device
)
torch.ops._C.quant2d(out1, x_q, x_scale, force_sdnn=True)
torch.ops._C.moe_fc(
@@ -150,9 +176,10 @@ def apply(
y=out,
topk_ids=topk_ids,
# sort_mode=False,
act=None)
act=None,
)
dequant_scale = torch.ones([M, top_k], dtype = torch.float32, device=out.device)
dequant_scale = torch.ones([M, top_k], dtype=torch.float32, device=out.device)
output = torch.empty([M, N], dtype=hidden_states.dtype, device=hidden_states.device)
sorted_tokens_idx = sorted_tokens_idx.view(M, top_k)
@@ -161,9 +188,12 @@ def apply(
moe_index=sorted_tokens_idx,
normed_scale=normed_score,
dequant_scale=dequant_scale,
y=output
y=output,
)
return output
CompressedTensorsW8A8Int8MoEMethod.process_weights_after_loading = process_weights_after_loading
CompressedTensorsW8A8Int8MoEMethod.apply = apply
CompressedTensorsW8A8Int8MoEMethod.process_weights_after_loading = (
process_weights_after_loading
)
CompressedTensorsW8A8Int8MoEMethod.apply = apply

View File

@@ -1,122 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Optional
import torch
from vllm.model_executor.layers.quantization.kernels.scaled_mm.ScaledMMLinearKernel import ScaledMMLinearLayerConfig
from vllm.model_executor.layers.quantization.kernels.scaled_mm.cutlass import CutlassScaledMMLinearKernel
from vllm.model_executor.layers.quantization.utils import replace_parameter
from vllm.model_executor.layers.quantization.utils.w8a8_utils import (
convert_to_channelwise)
def can_implement_kunlun(
cls, c: ScaledMMLinearLayerConfig=None) -> tuple[bool, Optional[str]]:
return True, None
def klx_process_weights_after_loading(layer: torch.nn.Module) -> None:
"""modify scale -> abs max"""
layer.weight = torch.nn.Parameter(layer.weight.data, requires_grad=False)
layer.weight_scale = torch.nn.Parameter(
layer.weight_scale.data * 127, requires_grad=False)
def process_weights_after_loading_kunlun(self, layer: torch.nn.Module) -> None:
# WEIGHT
# Cutlass kernels need transposed weight.
weight = getattr(layer, self.w_q_name)
replace_parameter(
layer, self.w_q_name,
torch.nn.Parameter(weight.t().data, requires_grad=False))
# WEIGHT SCALE
# Cutlass kernels support only per-tensor and per-channel.
# If we have a fused module (QKV, MLP) with per tensor scales (thus N
# scales being passed to the kernel), convert to the per-channel case.
is_fused_module = len(layer.logical_widths) > 1
weight_scale = getattr(layer, self.w_s_name)
if is_fused_module and not self.config.is_channelwise:
weight_scale = convert_to_channelwise(weight_scale,
layer.logical_widths)
replace_parameter(
layer, self.w_s_name,
torch.nn.Parameter(weight_scale.data, requires_grad=False))
# INPUT SCALE
if self.config.is_static_input_scheme:
input_scale = getattr(layer, self.i_s_name)
if self.config.input_symmetric:
replace_parameter(
layer, self.i_s_name,
torch.nn.Parameter(input_scale.max(), requires_grad=False))
setattr(layer, self.i_zp_name, None)
else:
input_zero_point = getattr(layer, self.i_zp_name)
# reconstruct the ranges
int8_traits = torch.iinfo(torch.int8)
azps = input_zero_point.to(dtype=torch.int32)
range_max = (input_scale * (int8_traits.max - azps)).max()
range_min = (input_scale * (int8_traits.min - azps)).min()
scale = (range_max - range_min) / (int8_traits.max -
int8_traits.min)
replace_parameter(
layer, self.i_s_name,
torch.nn.Parameter(scale, requires_grad=False))
# AZP loaded as int8 but used as int32
azp = (int8_traits.min -
range_min / scale).to(dtype=torch.int32)
replace_parameter(layer, self.i_zp_name,
torch.nn.Parameter(azp, requires_grad=False))
else:
setattr(layer, self.i_s_name, None)
setattr(layer, self.i_zp_name, None)
# azp_adj is the AZP adjustment term, used to account for weights.
# It does not depend on scales or azp, so it is the same for
# static and dynamic quantization.
# For more details, see csrc/quantization/cutlass_w8a8/Epilogues.md
# https://github.com/vllm-project/vllm/blob/8d59dbb00044a588cab96bcdc028006ed922eb06/csrc/quantization/cutlass_w8a8/Epilogues.md
if not self.config.input_symmetric:
weight = getattr(layer, self.w_q_name)
azp_adj = weight.sum(dim=0, keepdim=True, dtype=torch.int32)
if self.config.is_static_input_scheme:
# cutlass_w8a8 requires azp to be folded into azp_adj
# in the per-tensor case
azp_adj = getattr(layer, self.i_zp_name) * azp_adj
setattr(layer, self.azp_adj_name,
torch.nn.Parameter(azp_adj, requires_grad=False))
else:
setattr(layer, self.azp_adj_name, None)
klx_process_weights_after_loading(layer)
def apply_weights_kunlun(self,
layer: torch.nn.Module,
x: torch.Tensor,
bias: Optional[torch.Tensor] = None) -> torch.Tensor:
x_q, x_scale, out = None, None, None
w_t_shape = layer.weight.T.shape
if isinstance(x, tuple):
x_q, x_scale = x
out = torch.empty((x_q.shape[0], w_t_shape[0]),
dtype=torch.bfloat16,
device=x_q.device)
else:
x_shape = x.shape
x_q = torch.empty(x_shape, dtype=torch.int8, device=x.device)
x_scale = torch.empty((x_shape[0], 1), dtype=torch.float32, device=x.device)
out = torch.empty((x_shape[0], w_t_shape[0]),
dtype=x.dtype,
device=x.device)
torch.ops._C.quant2d(x, x_q, x_scale, force_sdnn=True)
torch.ops._C.gemm_I8_I8_bf16_nt(x_q, x_scale, layer.weight.T.data, layer.weight_scale.data, out)
return out
CutlassScaledMMLinearKernel.apply_weights = apply_weights_kunlun
CutlassScaledMMLinearKernel.can_implement = can_implement_kunlun
CutlassScaledMMLinearKernel.process_weights_after_loading = process_weights_after_loading_kunlun

View File

@@ -0,0 +1,109 @@
#
# Copyright (c) 2025 Baidu, Inc. All Rights Reserved.
# Author: Liwei, Tang Shiwen
# Email: liwei157@baidu.com, tangshiwen@baidu.com
# This file is a part of the vllm-kunlun project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
import xspeedgate_ops
from vllm.model_executor.layers.quantization.utils import replace_parameter
from vllm.model_executor.layers.quantization.utils.w8a8_utils import (
convert_to_channelwise,
)
from vllm.platforms import current_platform
from vllm.model_executor.layers.quantization.kernels.scaled_mm import ( # noqa: E501
ScaledMMLinearLayerConfig,
CutlassScaledMMLinearKernel,
)
from vllm.platforms import PlatformEnum
from vllm.model_executor.layers.quantization.kernels.scaled_mm import _POSSIBLE_KERNELS
class KunlunScaledMMLinearKernel(CutlassScaledMMLinearKernel):
@classmethod
def can_implement(cls, c: ScaledMMLinearLayerConfig) -> tuple[bool, Optional[str]]:
if not current_platform.is_kunlun():
return False, "KunlunScaledMM requires running on XPU."
return True, None
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
super().process_weights_after_loading(layer)
# change scale to max for klx ops
with torch.no_grad():
getattr(layer, self.w_s_name).mul_(127.0)
def apply_weights(
self,
layer: torch.nn.Module,
x: torch.Tensor,
bias: Optional[torch.Tensor] = None,
) -> torch.Tensor:
w_q, w_s, x_s, x_zp, azp_adj = self._get_weight_params(layer)
symmetric = azp_adj is None
# scaled_int8_quant supports both dynamic and static quant
# Currently, static is per-tensor and dynamic is per-token
x_q, x_s, x_zp, static = torch.ops._C.scaled_int8_quant(
x=x.contiguous(),
scale=x_s,
azp=x_zp,
symmetric=symmetric,
)
if x_zp is not None: # asymmetric
azp = None if static else x_zp
return torch.ops._C.cutlass_scaled_mm_azp(
a=x_q,
b=w_q,
scale_a=x_s,
scale_b=(w_s / 127.0).transpose(0, 1),
out_dtype=x.dtype,
azp_adj=azp_adj,
azp=azp,
bias=bias.to(torch.float32).contiguous() if bias else None,
)
else: # symmetric
return torch.ops._C.matmul(
x=x_q,
w=w_q.transpose(0, 1),
out_dtype=x.dtype,
x_pc_max=x_s * 127.0 if static else x_s,
w_pc_max=w_s,
bias=bias.to(torch.float32).contiguous() if bias else None,
)
# backup option: lower performance
# return torch.ops._C.cutlass_scaled_mm(
# a = x_q,
# b = w_q,
# scale_a=x_s / 127.0 if not static else x_s,
# scale_b=(w_s / 127.0).transpose(0, 1),
# out_dtype=x.dtype,
# bias=bias.to(torch.float32).contiguous() if bias else None,
# )
_POSSIBLE_KERNELS[PlatformEnum.CUDA] = [KunlunScaledMMLinearKernel]
print(
f"[vllm_kunlun] ScaledMM kernels: {[k.__name__ for k in _POSSIBLE_KERNELS[PlatformEnum.CUDA]]}"
)

File diff suppressed because it is too large Load Diff