[refactor]update Kunlun classes with monkey patch (#122)

Signed-off-by: Li Wei <liwei.109@outlook.com>
This commit is contained in:
Li Wei
2026-01-19 20:24:19 +08:00
committed by GitHub
parent 2512259944
commit 8f56cbf3ed
8 changed files with 444 additions and 378 deletions

View File

@@ -21,7 +21,7 @@ from vllm.distributed import (
tensor_model_parallel_all_gather,
)
from vllm.logger import init_logger
from vllm_kunlun.ops.fused_moe.layer import FusedMoE
from vllm.model_executor.layers.fused_moe.layer import FusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
MergedColumnParallelLinear,

View File

@@ -38,7 +38,7 @@ from vllm.distributed import (get_ep_group, get_pp_group,
tensor_model_parallel_all_gather)
from vllm.logger import init_logger
from vllm_kunlun.ops.activation import SiluAndMul
from vllm_kunlun.ops.fused_moe.layer import FusedMoE
from vllm.model_executor.layers.fused_moe.layer import FusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (MergedColumnParallelLinear,
QKVParallelLinear,

View File

@@ -27,7 +27,7 @@ from vllm.logger import init_logger
from vllm_kunlun.ops.fla import (fused_recurrent_gated_delta_rule, torch_chunk_gated_delta_rule, chunk_gated_delta_rule)
from vllm.model_executor.layers.fla.ops import (
RMSNormGated)
from vllm_kunlun.ops.fused_moe.layer import FusedMoE
from vllm.model_executor.layers.fused_moe.layer import FusedMoE
# yapf conflicts with isort for this block
# yapf: disable
from vllm.model_executor.layers.layernorm import (

View File

@@ -1,17 +1,35 @@
"""layer.py"""
#
# Copyright (c) 2026 Baidu, Inc. All Rights Reserved.
#
# This file is a part of the vllm-kunlun project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import nullcontext
from typing import Callable, Optional, Union, get_args
from typing import Callable, Optional
import torch
from vllm.model_executor.layers.quantization.compressed_tensors.utils import (
should_ignore_layer,
)
from vllm.model_executor.layers.quantization.base_config import QuantizationConfig
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.fused_moe.layer import UnquantizedFusedMoEMethod
from vllm.model_executor.layers.fused_moe.layer import (
UnquantizedFusedMoEMethod,
FusedMoE,
)
def apply(
class KunlunUnquantizedFusedMoEMethod(UnquantizedFusedMoEMethod):
def apply(
self,
layer: torch.nn.Module,
x: torch.Tensor,
@@ -37,12 +55,15 @@ def apply(
"""apply"""
if enable_eplb:
raise NotImplementedError(
"EPLB not supported for `UnquantizedFusedMoEMethod` yet.")
"EPLB not supported for `UnquantizedFusedMoEMethod` yet."
)
"""forward_kunlun"""
from vllm_kunlun.ops._kunlun_ops import KunlunOps as ops
if self.moe.use_ep:
return ops.fused_moe_ep(x,
return ops.fused_moe_ep(
x,
layer.w13_weight,
layer.w2_weight,
router_logits,
@@ -52,9 +73,11 @@ def apply(
inplace=True,
use_grouped_topk=use_grouped_topk,
num_expert_group=num_expert_group,
topk_group=topk_group)
topk_group=topk_group,
)
else:
return ops.fused_moe(x,
return ops.fused_moe(
x,
layer.w13_weight,
layer.w2_weight,
router_logits,
@@ -67,13 +90,12 @@ def apply(
topk_group=topk_group,
scoring_func=scoring_func,
e_score_correction_bias=e_score_correction_bias,
w1_bias=getattr(layer, 'w13_bias', None),
w2_bias=getattr(layer, 'w2_bias', None),
w1_bias=getattr(layer, "w13_bias", None),
w2_bias=getattr(layer, "w2_bias", None),
)
UnquantizedFusedMoEMethod.apply = apply
class VllmFusedMoE(FusedMoE):
class KunlunFusedMoE(FusedMoE):
def __init__(
self,
num_experts: int, # Global number of experts
@@ -131,7 +153,8 @@ class VllmFusedMoE(FusedMoE):
has_bias=has_bias,
is_sequence_parallel=is_sequence_parallel,
zero_expert_num=zero_expert_num,
zero_expert_type=zero_expert_type)
zero_expert_type=zero_expert_type,
)
self.has_bias = has_bias
self.register_parameter("w13_bias", None)
self.register_parameter("w2_bias", None)
@@ -143,7 +166,7 @@ class VllmFusedMoE(FusedMoE):
fused_mapping=self.quant_config.packed_modules_mapping,
)
):
self.quant_method = UnquantizedFusedMoEMethod(self.moe_config)
self.quant_method = KunlunUnquantizedFusedMoEMethod(self.moe_config)
moe_quant_params = {
"num_experts": self.local_num_experts,
"hidden_size": hidden_size,
@@ -154,4 +177,17 @@ class VllmFusedMoE(FusedMoE):
self.quant_method.create_weights(layer=self, **moe_quant_params)
FusedMoE = VllmFusedMoE
# monkey patch
from vllm.model_executor.layers.fused_moe import layer
layer.UnquantizedFusedMoEMethod = KunlunUnquantizedFusedMoEMethod
layer.FusedMoE = KunlunFusedMoE
print(
"[Monkey Patch Applied] >>> from vllm.model_executor.layers.fused_moe.layer.UnquantizedFusedMoEMethod \
--> vllm_kunlun.ops.fused_moe.layer.KunlunUnquantizedFusedMoEMethod"
)
print(
"[Monkey Patch Applied] >>> from vllm.model_executor.layers.fused_moe.layer.FusedMoE \
--> vllm_kunlun.ops.fused_moe.layer.KunlunFusedMoE"
)

View File

@@ -17,12 +17,13 @@
# limitations under the License.
import torch
from vllm.logger import init_logger
from typing import Optional
from vllm.model_executor.layers.quantization.awq import AWQLinearMethod
logger = init_logger(__name__)
def repack_int4_for_kunlun(self, packed: torch.Tensor, num_bits: int = 4):
class KunlunAWQLinearMethod(AWQLinearMethod):
def repack_int4_for_kunlun(self, packed: torch.Tensor, num_bits: int = 4):
"""Convert AWQ-packed int4 weights to Kunlun XPU format.
Input: packed[N, K], dtype=int32, saved as AWQ order
Output: packed_reordered[N, K], dtype=int32, saved as Kunlun order
@@ -76,7 +77,8 @@ def repack_int4_for_kunlun(self, packed: torch.Tensor, num_bits: int = 4):
return packed_kunlun
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
logger.warning_once(f"Repacking INT4 for XPU ...")
layer.qweight = torch.nn.Parameter(
(
self.repack_int4_for_kunlun(layer.qweight.data)
@@ -96,9 +98,9 @@ def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
layer.scales = torch.nn.Parameter(layer.scales.data, requires_grad=False)
def apply(
def apply(
self, layer: torch.nn.Module, x: torch.Tensor, bias: Optional[torch.Tensor] = None
) -> torch.Tensor:
) -> torch.Tensor:
qweight = layer.qweight
scales = layer.scales
qzeros = layer.qzeros
@@ -123,6 +125,11 @@ def apply(
return out.reshape(out_shape)
AWQLinearMethod.repack_int4_for_kunlun = repack_int4_for_kunlun
AWQLinearMethod.process_weights_after_loading = process_weights_after_loading
AWQLinearMethod.apply = apply
# monkey patch
from vllm.model_executor.layers.quantization import awq
awq.AWQLinearMethod = KunlunAWQLinearMethod
print(
"[Monkey Patch Applied] >>> vllm.model_executor.layers.quantization.awq.AWQLinearMethod \
--> vllm_kunlun.ops.quantization.awq.KunlunAWQLinearMethod"
)

View File

@@ -24,14 +24,15 @@ from vllm.model_executor.layers.quantization.compressed_tensors.compressed_tenso
)
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
class KunlunCompressedTensorsW8A8Int8MoEMethod(CompressedTensorsW8A8Int8MoEMethod):
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
# NOTE: xtorch_ops use max as scale
with torch.no_grad():
layer.w13_weight_scale.mul_(127.0)
layer.w2_weight_scale.mul_(127.0)
def apply(
def apply(
self,
layer: torch.nn.Module,
x: torch.Tensor,
@@ -53,7 +54,7 @@ def apply(
expert_load_view: Optional[torch.Tensor] = None,
logical_to_physical_map: Optional[torch.Tensor] = None,
logical_replica_count: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
hidden_states = x
global_num_experts, up_gate_size, _ = layer.w13_weight.shape
M, N = hidden_states.shape
@@ -64,7 +65,10 @@ def apply(
topk_ids = torch.empty(M, top_k, dtype=torch.int32, device=hidden_states.device)
num_blocks = 12
block_statistic = torch.zeros(
num_blocks, global_num_experts, dtype=torch.int32, device=hidden_states.device
num_blocks,
global_num_experts,
dtype=torch.int32,
device=hidden_states.device,
)
router_logits = router_logits.float()
@@ -180,7 +184,9 @@ def apply(
)
dequant_scale = torch.ones([M, top_k], dtype=torch.float32, device=out.device)
output = torch.empty([M, N], dtype=hidden_states.dtype, device=hidden_states.device)
output = torch.empty(
[M, N], dtype=hidden_states.dtype, device=hidden_states.device
)
sorted_tokens_idx = sorted_tokens_idx.view(M, top_k)
torch.ops._C.moe_post(
@@ -193,7 +199,15 @@ def apply(
return output
CompressedTensorsW8A8Int8MoEMethod.process_weights_after_loading = (
process_weights_after_loading
# monkey patch
from vllm.model_executor.layers.quantization.compressed_tensors import (
compressed_tensors_moe,
)
compressed_tensors_moe.CompressedTensorsW8A8Int8MoEMethod = (
KunlunCompressedTensorsW8A8Int8MoEMethod
)
print(
"[Monkey Patch Applied] >>> vllm.model_executor.layers.quantization.compressed_tensors.compressed_tensors_moe.CompressedTensorsW8A8Int8MoEMethod \
--> vllm_kunlun.ops.quantization.compressed_tensors_moe.py:KunlunCompressedTensorsW8A8Int8MoEMethod"
)
CompressedTensorsW8A8Int8MoEMethod.apply = apply

View File

@@ -17,14 +17,16 @@
# limitations under the License.
import torch
from torch.nn.parameter import Parameter
from typing import Optional
from torch.nn.parameter import Parameter
from vllm.logger import init_logger
from vllm.model_executor.layers.quantization.gptq import GPTQLinearMethod, ExllamaState
logger = init_logger(__name__)
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
class KunlunGPTQLinearMethod(GPTQLinearMethod):
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
# for torch.compile
logger.warning_once(f"Repacking INT4 for XPU ...")
layer.qzeros = Parameter(
self.repack_int4_for_kunlun(layer.qzeros.data, self.quant_config.weight_bits)
if self.quant_config.weight_bits == 4 else layer.qzeros.data,
@@ -50,7 +52,7 @@ def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
# self.quant_config.weight_bits)
def repack_int4_for_kunlun(self, packed: torch.Tensor, num_bits: int = 4):
def repack_int4_for_kunlun(self, packed: torch.Tensor, num_bits: int = 4):
N, K = packed.shape
assert num_bits == 4, "Only int4 supported now"
shifts = torch.arange(0, 32, num_bits, device=packed.device, dtype=torch.int32)
@@ -83,9 +85,9 @@ def repack_int4_for_kunlun(self, packed: torch.Tensor, num_bits: int = 4):
return packed_kunlun
def apply(
def apply(
self, layer: torch.nn.Module, x: torch.Tensor, bias: Optional[torch.Tensor] = None
) -> torch.Tensor:
) -> torch.Tensor:
out_shape = x.shape[:-1] + (layer.qweight.shape[-1], )
reshaped_x = x.reshape(-1, x.shape[-1])
@@ -103,6 +105,11 @@ def apply(
return output.reshape(out_shape)
GPTQLinearMethod.repack_int4_for_kunlun = repack_int4_for_kunlun
GPTQLinearMethod.process_weights_after_loading = process_weights_after_loading
GPTQLinearMethod.apply = apply
# monkey patch
from vllm.model_executor.layers.quantization import gptq
gptq.GPTQLinearMethod = KunlunGPTQLinearMethod
print(
"[Monkey Patch Applied] >>> vllm.model_executor.layers.quantization.gptq.GPTQLinearMethod \
--> vllm_kunlun.ops.quantization.gptq.KunlunGPTQLinearMethod"
)

View File

@@ -21,7 +21,6 @@ from typing import Optional
import torch
import xspeedgate_ops
from vllm.platforms import current_platform, PlatformEnum
from vllm.model_executor.layers.quantization.utils import replace_parameter
from vllm.model_executor.layers.quantization.utils.w8a8_utils import (
convert_to_channelwise,
)
@@ -100,9 +99,12 @@ class KunlunScaledMMLinearKernel(CutlassScaledMMLinearKernel):
# )
# monkey patch
_POSSIBLE_KERNELS[PlatformEnum.CUDA] = [KunlunScaledMMLinearKernel]
from vllm.model_executor.layers.quantization.kernels.scaled_mm import cutlass
cutlass.CutlassScaledMMLinearKernel = KunlunScaledMMLinearKernel
print(
f"[vllm_kunlun] ScaledMM kernels: {[k.__name__ for k in _POSSIBLE_KERNELS[PlatformEnum.CUDA]]}"
"[Monkey Patch Applied] >>> vllm.model_executor.layers.quantization.kernels.scaled_mm.cutlass.CutlassScaledMMLinearKernel \
--> vllm_kunlun.ops.quantization.kernels.kunlun_scale_mm.KunlunScaledMMLinearKernel"
)