Files
xc-llm-ascend/vllm_ascend/_310p/quantization/methods/w8a8s.py
Shaoxu Cheng 2064afe380 [300I][Bugfix] fix unquant model weight nd2nz error (#6851)
### What this PR does / why we need it?
- This PR fixes an issue with weight format conversion for unquantized
models running on Ascend 310P devices.

- The changes refactor the logic for converting weights to the
FRACTAL_NZ format. Previously, this was handled in a 310P-specific
linear layer implementation (`AscendUnquantizedLinearMethod310`). This
implementation has been removed, and the logic is now centralized in the
`maybe_trans_nz` utility function. This function now checks if the
device is a 310P and applies the NZ format cast accordingly for
`float16`/`bfloat16` weights.

- This refactoring simplifies the code by removing platform-specific
duplication and ensures correct weight handling for unquantized models
on 310P.

### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
ut and local test
- vLLM version: v0.15.0
- vLLM main:
83b47f67b1

---------

Signed-off-by: Tflowers-0129 <2906339855@qq.com>
2026-03-03 15:57:26 +08:00

88 lines
3.0 KiB
Python

#
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from typing import Any
import torch
import torch_npu
from vllm_ascend.quantization.methods.base import AscendLinearScheme
from vllm_ascend.utils import maybe_trans_nz
from .registry import register_scheme
@register_scheme("W8A8S", "linear")
class AscendW8A8SLinearMethod310(AscendLinearScheme):
"""310P-only W8A8S Sparse linear scheme.
Notes:
- This scheme is discovered via 310P local registry.
"""
def get_weight(
self,
input_size: int,
output_size: int,
params_dtype: torch.dtype = torch.float16,
) -> dict[str, Any]:
return {"weight": torch.empty(output_size, input_size, dtype=torch.int8)}
def get_pertensor_param(self, params_dtype: torch.dtype) -> dict[str, Any]:
return {
"input_scale": torch.empty(1, dtype=params_dtype),
"input_offset": torch.empty(1, dtype=torch.int8),
}
def get_perchannel_param(self, output_size: int, params_dtype: torch.dtype) -> dict[str, Any]:
return {
"quant_bias": torch.empty(output_size, dtype=torch.int32),
"deq_scale": torch.empty(output_size, dtype=torch.int64),
}
def apply(
self,
layer: torch.nn.Module,
x: torch.Tensor,
bias: torch.Tensor | None = None,
tp_rank: int | None = 0,
) -> torch.Tensor:
if x.dtype != torch.int8:
x = torch.ops.vllm.quantize(
x,
layer.aclnn_input_scale,
layer.aclnn_input_scale_reciprocal,
layer.aclnn_input_offset,
)
quant_bias = layer.quant_bias if tp_rank == 0 else None
return torch_npu.npu_quant_matmul(
x,
layer.weight.data.transpose(0, 1),
layer.deq_scale,
bias=quant_bias,
output_dtype=layer.params_dtype,
)
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
expanding_factor = layer.weight.data.shape[1]
layer.aclnn_input_scale = layer.input_scale.data.repeat(expanding_factor)
layer.aclnn_input_scale_reciprocal = 1.0 / layer.aclnn_input_scale.data
layer.aclnn_input_offset = layer.input_offset.data.repeat(expanding_factor).to(layer.aclnn_input_scale.dtype)
layer.weight.data = maybe_trans_nz(layer.weight.data)