[Lint]Style: Convert vllm-ascend/ to ruff format(Batch #7) (#6023)

### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
|` vllm_ascend/quantization/compressed_tensors/compressed_tensors.py`|
|` vllm_ascend/quantization/quant_config.py`|
|` vllm_ascend/quantization/utils.py`|
|` vllm_ascend/quantization/w4a16.py`|
|` vllm_ascend/quantization/w4a4_flatquant_dynamic.py`|
|` vllm_ascend/quantization/w4a8_dynamic.py`|
|` vllm_ascend/quantization/w8a16.py`|
|` vllm_ascend/quantization/w8a8.py`|
|` vllm_ascend/quantization/w8a8_dynamic.py`|
|` vllm_ascend/quantization/w8a8_pdmix.py`|
|` vllm_ascend/quantization/w8a8mxfp8.py`|
|` vllm_ascend/sample/rejection_sampler.py`|
|` vllm_ascend/sample/sampler.py`|
|` vllm_ascend/worker/block_table.py`|

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
2c24bc6996

Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
SILONG ZENG
2026-02-06 14:56:53 +08:00
committed by GitHub
parent d0bc16859c
commit 99aedaff63
20 changed files with 997 additions and 1307 deletions

View File

@@ -15,7 +15,8 @@
# limitations under the License.
#
from typing import Any, Callable, Dict, Optional
from collections.abc import Callable
from typing import Any
import torch
import torch_npu
@@ -56,8 +57,7 @@ def unpack_from_int32(
dtype=torch.int32,
)
for i in range(pack_factor):
unpacked_weight[:, i::pack_factor] = (weight >>
(num_bits * i)) & mask
unpacked_weight[:, i::pack_factor] = (weight >> (num_bits * i)) & mask
original_row_size = int(shape[1])
unpacked_weight = unpacked_weight[:, :original_row_size]
else:
@@ -67,8 +67,7 @@ def unpack_from_int32(
dtype=torch.int32,
)
for i in range(pack_factor):
unpacked_weight[i::pack_factor, :] = (weight >>
(num_bits * i)) & mask
unpacked_weight[i::pack_factor, :] = (weight >> (num_bits * i)) & mask
original_row_size = int(shape[0])
unpacked_weight = unpacked_weight[:original_row_size, :]
@@ -84,22 +83,17 @@ def pack_to_int32(weight: torch.Tensor) -> torch.Tensor:
:param weight: The 3D tensor to pack, must be int8 or int32 dtype
:return: Packed tensor with int32 dtype optimized for storage
"""
assert weight.dim(
) == 3, f"Expecting `weight.dim()` is 3 ([e, n, k] or [e, k, n]) but got {weight.dim()}."
assert weight.dtype in [
torch.int8, torch.int32
], f"Expecting `weight.dtype` is torch.int8 or torch.int32 bug got {weight.dtype}."
assert weight.dim() == 3, f"Expecting `weight.dim()` is 3 ([e, n, k] or [e, k, n]) but got {weight.dim()}."
assert weight.dtype in [torch.int8, torch.int32], (
f"Expecting `weight.dtype` is torch.int8 or torch.int32 bug got {weight.dtype}."
)
if weight.dtype == torch.int32:
assert weight.shape[
-1] % 8 == 0, "the last dim of weight needs to be divided by 8."
packed_weight = torch_npu.npu_convert_weight_to_int4pack(
weight.flatten(0, 1))
packed_weight = packed_weight.view(weight.shape[0], weight.shape[1],
-1)
assert weight.shape[-1] % 8 == 0, "the last dim of weight needs to be divided by 8."
packed_weight = torch_npu.npu_convert_weight_to_int4pack(weight.flatten(0, 1))
packed_weight = packed_weight.view(weight.shape[0], weight.shape[1], -1)
else:
assert weight.shape[
-1] % 4 == 0, "the last dim of weight needs to be divided by 4."
assert weight.shape[-1] % 4 == 0, "the last dim of weight needs to be divided by 4."
packed_weight = weight.view(torch.int32).contiguous()
return packed_weight
@@ -115,8 +109,7 @@ class AscendW4A16FusedMoEMethod(AscendMoEScheme):
self.pack_factor = 8 # pack 8 of torch.int4 tensors to torch.int32
vllm_config = get_current_vllm_config()
self.group_size = vllm_config.quant_config.quant_description.get(
"group_size", 32)
self.group_size = vllm_config.quant_config.quant_description.get("group_size", 32)
self.dynamic_eplb = get_ascend_config().eplb_config.dynamic_eplb
def get_weight(
@@ -125,22 +118,23 @@ class AscendW4A16FusedMoEMethod(AscendMoEScheme):
intermediate_size_per_partition: int,
hidden_sizes: int,
params_dtype: torch.dtype,
) -> Dict[str, Any]:
assert intermediate_size_per_partition % self.pack_factor == 0, f"Expecting `intermediate_size_per_partition` {intermediate_size_per_partition} can be divided by `pack_factor` {self.pack_factor}"
assert hidden_sizes % self.pack_factor == 0, f"Expecting `hidden_sizes` {hidden_sizes} can be divided by `pack_factor` {self.pack_factor}"
) -> dict[str, Any]:
assert intermediate_size_per_partition % self.pack_factor == 0, (
f"Expecting `intermediate_size_per_partition` {intermediate_size_per_partition} "
f"can be divided by `pack_factor` {self.pack_factor}"
)
assert hidden_sizes % self.pack_factor == 0, (
f"Expecting `hidden_sizes` {hidden_sizes} can be divided by `pack_factor` {self.pack_factor}"
)
param_dict = {}
param_dict["w13_weight_packed"] = torch.empty(
num_experts,
2 * intermediate_size_per_partition,
hidden_sizes // self.pack_factor,
dtype=torch.int32)
num_experts, 2 * intermediate_size_per_partition, hidden_sizes // self.pack_factor, dtype=torch.int32
)
param_dict["w2_weight_packed"] = torch.empty(
num_experts,
hidden_sizes,
intermediate_size_per_partition // self.pack_factor,
dtype=torch.int32)
num_experts, hidden_sizes, intermediate_size_per_partition // self.pack_factor, dtype=torch.int32
)
return param_dict
@@ -150,38 +144,31 @@ class AscendW4A16FusedMoEMethod(AscendMoEScheme):
intermediate_size_per_partition: int,
hidden_sizes: int,
params_dtype: torch.dtype,
) -> Dict[str, Any]:
assert intermediate_size_per_partition % self.group_size == 0, f"Expecting `intermediate_size_per_partition` {intermediate_size_per_partition} can be divided by `group_size` {self.group_size}"
assert hidden_sizes % self.group_size == 0, f"Expecting `hidden_sizes` {hidden_sizes} can be divided by `group_size` {self.group_size}"
) -> dict[str, Any]:
assert intermediate_size_per_partition % self.group_size == 0, (
f"Expecting `intermediate_size_per_partition` {intermediate_size_per_partition} "
f"can be divided by `group_size` {self.group_size}"
)
assert hidden_sizes % self.group_size == 0, (
f"Expecting `hidden_sizes` {hidden_sizes} can be divided by `group_size` {self.group_size}"
)
param_dict = {}
param_dict["w13_weight_scale"] = torch.empty(
num_experts,
2 * intermediate_size_per_partition,
hidden_sizes // self.group_size,
dtype=torch.bfloat16)
num_experts, 2 * intermediate_size_per_partition, hidden_sizes // self.group_size, dtype=torch.bfloat16
)
param_dict["w2_weight_scale"] = torch.empty(
num_experts,
hidden_sizes,
intermediate_size_per_partition // self.group_size,
dtype=torch.bfloat16)
param_dict["w13_weight_shape"] = torch.empty(num_experts,
2,
dtype=torch.int32)
param_dict["w2_weight_shape"] = torch.empty(num_experts,
2,
dtype=torch.int32)
num_experts, hidden_sizes, intermediate_size_per_partition // self.group_size, dtype=torch.bfloat16
)
param_dict["w13_weight_shape"] = torch.empty(num_experts, 2, dtype=torch.int32)
param_dict["w2_weight_shape"] = torch.empty(num_experts, 2, dtype=torch.int32)
param_dict["w13_weight_offset"] = torch.zeros(
num_experts,
2 * intermediate_size_per_partition,
hidden_sizes // self.group_size,
dtype=torch.bfloat16)
num_experts, 2 * intermediate_size_per_partition, hidden_sizes // self.group_size, dtype=torch.bfloat16
)
param_dict["w2_weight_offset"] = torch.zeros(
num_experts,
hidden_sizes,
intermediate_size_per_partition // self.group_size,
dtype=torch.bfloat16)
num_experts, hidden_sizes, intermediate_size_per_partition // self.group_size, dtype=torch.bfloat16
)
return param_dict
@@ -194,21 +181,22 @@ class AscendW4A16FusedMoEMethod(AscendMoEScheme):
renormalize: bool,
use_grouped_topk: bool = False,
global_num_experts: int = -1,
expert_map: Optional[torch.Tensor] = None,
topk_group: Optional[int] = None,
num_expert_group: Optional[int] = None,
custom_routing_function: Optional[Callable] = None,
expert_map: torch.Tensor | None = None,
topk_group: int | None = None,
num_expert_group: int | None = None,
custom_routing_function: Callable | None = None,
scoring_func: str = "softmax",
routed_scaling_factor: float = 1.0,
e_score_correction_bias: Optional[torch.Tensor] = None,
e_score_correction_bias: torch.Tensor | None = None,
is_prefill: bool = True,
enable_force_load_balance: bool = True,
log2phy: Optional[torch.Tensor] = None,
log2phy: torch.Tensor | None = None,
global_redundant_expert_num: int = 0,
**kwargs,
) -> torch.Tensor:
assert router_logits.shape[
1] == global_num_experts - global_redundant_expert_num, "Number of global experts mismatch (excluding redundancy)"
assert router_logits.shape[1] == global_num_experts - global_redundant_expert_num, (
"Number of global experts mismatch (excluding redundancy)"
)
topk_weights, topk_ids = select_experts(
hidden_states=x,
@@ -221,7 +209,8 @@ class AscendW4A16FusedMoEMethod(AscendMoEScheme):
custom_routing_function=custom_routing_function,
scoring_func=scoring_func,
e_score_correction_bias=e_score_correction_bias,
global_num_experts=global_num_experts)
global_num_experts=global_num_experts,
)
topk_ids = topk_ids.to(torch.int32)
topk_weights = topk_weights.to(x.dtype)
@@ -241,38 +230,40 @@ class AscendW4A16FusedMoEMethod(AscendMoEScheme):
expert_map=expert_map,
log2phy=log2phy,
dynamic_eplb=self.dynamic_eplb,
mc2_mask=kwargs.get("mc2_mask", None))
mc2_mask=kwargs.get("mc2_mask"),
)
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
if self.transpose_weight:
w13_shape = layer.w13_weight_packed.data.shape
w2_shape = layer.w2_weight_packed.data.shape
unpacked_w13_weight = (unpack_from_int32(
layer.w13_weight_packed.data.flatten(0, 1),
torch.Size([
w13_shape[0] * w13_shape[1],
w13_shape[2] * self.pack_factor
]),
self.num_bits,
).view(w13_shape[0], w13_shape[1],
-1).transpose(1, 2).contiguous().int())
unpacked_w2_weight = (unpack_from_int32(
layer.w2_weight_packed.data.flatten(0, 1),
torch.Size([
w2_shape[0] * w2_shape[1], w2_shape[2] * self.pack_factor
]),
self.num_bits,
).view(w2_shape[0], w2_shape[1],
-1).transpose(1, 2).contiguous().int())
unpacked_w13_weight = (
unpack_from_int32(
layer.w13_weight_packed.data.flatten(0, 1),
torch.Size([w13_shape[0] * w13_shape[1], w13_shape[2] * self.pack_factor]),
self.num_bits,
)
.view(w13_shape[0], w13_shape[1], -1)
.transpose(1, 2)
.contiguous()
.int()
)
unpacked_w2_weight = (
unpack_from_int32(
layer.w2_weight_packed.data.flatten(0, 1),
torch.Size([w2_shape[0] * w2_shape[1], w2_shape[2] * self.pack_factor]),
self.num_bits,
)
.view(w2_shape[0], w2_shape[1], -1)
.transpose(1, 2)
.contiguous()
.int()
)
layer.w13_weight_packed.data = pack_to_int32(unpacked_w13_weight)
layer.w2_weight_packed.data = pack_to_int32(unpacked_w2_weight)
layer.w13_weight_scale.data = layer.w13_weight_scale.data.transpose(
1, 2).contiguous()
layer.w2_weight_scale.data = layer.w2_weight_scale.data.transpose(
1, 2).contiguous()
layer.w13_weight_scale.data = layer.w13_weight_scale.data.transpose(1, 2).contiguous()
layer.w2_weight_scale.data = layer.w2_weight_scale.data.transpose(1, 2).contiguous()
layer.w13_weight_offset.data = layer.w13_weight_offset.data.transpose(
1, 2).contiguous()
layer.w2_weight_offset.data = layer.w2_weight_offset.data.transpose(
1, 2).contiguous()
layer.w13_weight_offset.data = layer.w13_weight_offset.data.transpose(1, 2).contiguous()
layer.w2_weight_offset.data = layer.w2_weight_offset.data.transpose(1, 2).contiguous()