NZ Format Support for Linear Layers: Implemented support for the NZ
(N-dimensional Z-order) format for linear layer weights on Ascend 310P,
enhancing performance for both quantized and unquantized layers.
Unquantized Linear Method for Ascend 310P: Introduced
AscendUnquantizedLinearMethod310 to specifically handle and apply NZ
format casting to unquantized linear layer weights during the loading
process.
MRotaryEmbedding Integration: Extended Rotary Embedding support by
adding AscendMRotaryEmbedding310 to provide an Ascend-specific
implementation for MRotaryEmbedding.
Quantization Method Updates: Updated the w8a8_static quantization method
to directly transpose weights and apply NZ format casting, ensuring
consistency with the new format.
- vLLM version: v0.15.0
- vLLM main:
9562912cea
---------
Signed-off-by: Tflowers-0129 <2906339855@qq.com>
66 lines
2.2 KiB
Python
66 lines
2.2 KiB
Python
#
|
|
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
from __future__ import annotations
|
|
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch_npu
|
|
from vllm.model_executor.layers.linear import (
|
|
LinearBase,
|
|
QuantizeMethodBase,
|
|
UnquantizedLinearMethod,
|
|
)
|
|
from vllm.model_executor.layers.quantization.base_config import QuantizationConfig
|
|
|
|
from vllm_ascend.utils import ACL_FORMAT_FRACTAL_NZ
|
|
|
|
|
|
class AscendUnquantizedLinearMethod310(UnquantizedLinearMethod):
|
|
def process_weights_after_loading(self, layer: nn.Module) -> None:
|
|
super().process_weights_after_loading(layer)
|
|
if "conv1d" not in getattr(layer, "prefix", ""):
|
|
layer.weight.data = torch_npu.npu_format_cast(layer.weight.data, ACL_FORMAT_FRACTAL_NZ)
|
|
|
|
|
|
class AscendLinearBase310(LinearBase):
|
|
def __init__(
|
|
self,
|
|
input_size: int,
|
|
output_size: int,
|
|
skip_bias_add: bool = False,
|
|
params_dtype: object | None = None,
|
|
quant_config: QuantizationConfig | None = None,
|
|
prefix: str = "",
|
|
*,
|
|
return_bias: bool = True,
|
|
disable_tp: bool = False,
|
|
):
|
|
nn.Module.__init__(self)
|
|
|
|
self.input_size = int(input_size)
|
|
self.output_size = int(output_size)
|
|
self.skip_bias_add = skip_bias_add
|
|
self.params_dtype = torch.float16
|
|
self.quant_config = quant_config
|
|
self.prefix = prefix
|
|
self.return_bias = return_bias
|
|
self.disable_tp = disable_tp
|
|
|
|
if quant_config is None:
|
|
self.quant_method: QuantizeMethodBase | None = AscendUnquantizedLinearMethod310()
|
|
else:
|
|
self.quant_method = quant_config.get_quant_method(self, prefix=prefix)
|