[Lint]Style: Convert vllm-ascend/ to ruff format(Batch #7) (#6023)

### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
|` vllm_ascend/quantization/compressed_tensors/compressed_tensors.py`|
|` vllm_ascend/quantization/quant_config.py`|
|` vllm_ascend/quantization/utils.py`|
|` vllm_ascend/quantization/w4a16.py`|
|` vllm_ascend/quantization/w4a4_flatquant_dynamic.py`|
|` vllm_ascend/quantization/w4a8_dynamic.py`|
|` vllm_ascend/quantization/w8a16.py`|
|` vllm_ascend/quantization/w8a8.py`|
|` vllm_ascend/quantization/w8a8_dynamic.py`|
|` vllm_ascend/quantization/w8a8_pdmix.py`|
|` vllm_ascend/quantization/w8a8mxfp8.py`|
|` vllm_ascend/sample/rejection_sampler.py`|
|` vllm_ascend/sample/sampler.py`|
|` vllm_ascend/worker/block_table.py`|

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
2c24bc6996

Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
SILONG ZENG
2026-02-06 14:56:53 +08:00
committed by GitHub
parent d0bc16859c
commit 99aedaff63
20 changed files with 997 additions and 1307 deletions

View File

@@ -15,7 +15,7 @@
# limitations under the License.
#
from typing import Any, Dict, Optional
from typing import Any
import torch
import torch_npu
@@ -27,7 +27,7 @@ from .registry import register_scheme
@register_scheme("W4A4_DYNAMIC", "linear")
class AscendW4A4LaosDynamicLinearMethod(AscendLinearScheme):
"""Linear method for Ascend W4A4_DYNAMIC.
This class implements W4A4 quantization with LAOS approach and dynamic activation quantization.
- Weight: 4-bit quantization (per-channel) with scale and offset, stored as int8.
- Activation: 4-bit dynamic quantization.
@@ -37,7 +37,7 @@ class AscendW4A4LaosDynamicLinearMethod(AscendLinearScheme):
self.transpose_weight = True
self.rotation_type = None
def set_rotation_config(self, prefix: str, metadata: Dict) -> Optional[str]:
def set_rotation_config(self, prefix: str, metadata: dict) -> str | None:
"""Set rotation config based on prefix and metadata."""
layer_idx = prefix.split(".")[2]
if prefix.endswith("o_proj"):
@@ -50,34 +50,22 @@ class AscendW4A4LaosDynamicLinearMethod(AscendLinearScheme):
return "kronecker_rotation"
return None
def get_weight(self, input_size: int, output_size: int,
params_dtype: torch.dtype) -> Dict[str, Any]:
params_dict = {
"weight": torch.empty(output_size, input_size, dtype=torch.int8)
}
def get_weight(self, input_size: int, output_size: int, params_dtype: torch.dtype) -> dict[str, Any]:
params_dict = {"weight": torch.empty(output_size, input_size, dtype=torch.int8)}
return params_dict
def get_perchannel_param(self, output_size: int,
params_dtype: torch.dtype) -> Dict[str, Any]:
def get_perchannel_param(self, output_size: int, params_dtype: torch.dtype) -> dict[str, Any]:
params_dict = {}
params_dict["weight_scale"] = torch.empty(output_size,
1,
dtype=torch.float32)
params_dict["weight_offset"] = torch.empty(output_size,
1,
dtype=torch.float32)
params_dict["weight_scale"] = torch.empty(output_size, 1, dtype=torch.float32)
params_dict["weight_offset"] = torch.empty(output_size, 1, dtype=torch.float32)
if self.rotation_type == "heads_rotation":
params_dict["heads_rotation"] = torch.zeros((64, 64),
dtype=torch.float32)
params_dict["heads_rotation"] = torch.zeros((64, 64), dtype=torch.float32)
if self.rotation_type == "kronecker_rotation":
params_dict["kronecker_rotation_n"] = torch.zeros(
(160, 160), dtype=torch.float32)
params_dict["kronecker_rotation_m"] = torch.zeros(
(160, 160), dtype=torch.float32)
params_dict["kronecker_rotation_n"] = torch.zeros((160, 160), dtype=torch.float32)
params_dict["kronecker_rotation_m"] = torch.zeros((160, 160), dtype=torch.float32)
return params_dict
def apply_rotation(self, layer: torch.nn.Module,
x: torch.Tensor) -> torch.Tensor:
def apply_rotation(self, layer: torch.nn.Module, x: torch.Tensor) -> torch.Tensor:
"""Apply rotation transformation to input tensor."""
init_shape = x.shape
dtype = x.dtype
@@ -100,8 +88,8 @@ class AscendW4A4LaosDynamicLinearMethod(AscendLinearScheme):
self,
layer: torch.nn.Module,
x: torch.Tensor,
bias: Optional[torch.Tensor] = None,
tp_rank: Optional[int] = 0,
bias: torch.Tensor | None = None,
tp_rank: int | None = 0,
) -> torch.Tensor:
dtype = x.dtype
x, pertoken_scale = torch_npu.npu_dynamic_quant(x, dst_type=torch.quint4x2)
@@ -113,14 +101,14 @@ class AscendW4A4LaosDynamicLinearMethod(AscendLinearScheme):
scale=layer.weight_scale.data.view(-1),
pertoken_scale=pertoken_scale,
bias=None,
output_dtype=dtype)
output_dtype=dtype,
)
if bias is not None:
output = output + bias.to(dtype)
return output
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
layer.weight_scale.data = layer.weight_scale.data.to(torch.float32)
layer.weight.data = torch_npu.npu_convert_weight_to_int4pack(
layer.weight.data.to(torch.int32))
layer.weight.data = torch_npu.npu_convert_weight_to_int4pack(layer.weight.data.to(torch.int32))
if self.transpose_weight:
layer.weight.data = layer.weight.data.transpose(-1, -2)