### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
|` vllm_ascend/quantization/compressed_tensors/compressed_tensors.py`|
|` vllm_ascend/quantization/quant_config.py`|
|` vllm_ascend/quantization/utils.py`|
|` vllm_ascend/quantization/w4a16.py`|
|` vllm_ascend/quantization/w4a4_flatquant_dynamic.py`|
|` vllm_ascend/quantization/w4a8_dynamic.py`|
|` vllm_ascend/quantization/w8a16.py`|
|` vllm_ascend/quantization/w8a8.py`|
|` vllm_ascend/quantization/w8a8_dynamic.py`|
|` vllm_ascend/quantization/w8a8_pdmix.py`|
|` vllm_ascend/quantization/w8a8mxfp8.py`|
|` vllm_ascend/sample/rejection_sampler.py`|
|` vllm_ascend/sample/sampler.py`|
|` vllm_ascend/worker/block_table.py`|
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
2c24bc6996
Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
@@ -15,7 +15,8 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from typing import Any, Callable, Dict, Optional
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
import torch_npu
|
||||
@@ -28,8 +29,7 @@ from vllm_ascend.ascend_config import get_ascend_config
|
||||
from vllm_ascend.ascend_forward_context import MoECommType
|
||||
from vllm_ascend.distributed.parallel_state import get_mc2_group
|
||||
from vllm_ascend.flash_common3_context import get_flash_common3_context
|
||||
from vllm_ascend.ops.fused_moe.experts_selector import (select_experts,
|
||||
zero_experts_compute)
|
||||
from vllm_ascend.ops.fused_moe.experts_selector import select_experts, zero_experts_compute
|
||||
from vllm_ascend.utils import ACL_FORMAT_FRACTAL_NZ, maybe_trans_nz
|
||||
|
||||
from .base import AscendLinearScheme, AscendMoEScheme, QuantType
|
||||
@@ -39,16 +39,17 @@ from .registry import register_scheme
|
||||
def scale_from_float_to_int64(scale):
|
||||
"""Convert float32 scale to int64 representation."""
|
||||
import numpy as np
|
||||
|
||||
scale = torch.from_numpy(
|
||||
np.frombuffer(scale.cpu().to(torch.float32).numpy().tobytes(),
|
||||
dtype=np.int32).astype(np.int64)).to(scale.device)
|
||||
np.frombuffer(scale.cpu().to(torch.float32).numpy().tobytes(), dtype=np.int32).astype(np.int64)
|
||||
).to(scale.device)
|
||||
return scale
|
||||
|
||||
|
||||
@register_scheme("W8A8_DYNAMIC", "linear")
|
||||
class AscendW8A8DynamicLinearMethod(AscendLinearScheme):
|
||||
"""Linear method for Ascend W8A8_DYNAMIC.
|
||||
|
||||
|
||||
This scheme uses dynamic per-token quantization for activations
|
||||
and per-channel quantization for weights.
|
||||
"""
|
||||
@@ -56,33 +57,26 @@ class AscendW8A8DynamicLinearMethod(AscendLinearScheme):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def get_weight(self, input_size: int, output_size: int,
|
||||
params_dtype: torch.dtype) -> Dict[str, Any]:
|
||||
params_dict = {
|
||||
"weight": torch.empty(output_size, input_size, dtype=torch.int8)
|
||||
}
|
||||
def get_weight(self, input_size: int, output_size: int, params_dtype: torch.dtype) -> dict[str, Any]:
|
||||
params_dict = {"weight": torch.empty(output_size, input_size, dtype=torch.int8)}
|
||||
return params_dict
|
||||
|
||||
def get_perchannel_param(
|
||||
self,
|
||||
output_size: int,
|
||||
params_dtype: torch.dtype,
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
params_dict = {}
|
||||
params_dict["weight_scale"] = torch.empty(output_size,
|
||||
1,
|
||||
dtype=params_dtype)
|
||||
params_dict["weight_offset"] = torch.empty(output_size,
|
||||
1,
|
||||
dtype=params_dtype)
|
||||
params_dict["weight_scale"] = torch.empty(output_size, 1, dtype=params_dtype)
|
||||
params_dict["weight_offset"] = torch.empty(output_size, 1, dtype=params_dtype)
|
||||
return params_dict
|
||||
|
||||
def apply(
|
||||
self,
|
||||
layer: torch.nn.Module,
|
||||
x: torch.Tensor,
|
||||
bias: Optional[torch.Tensor] = None,
|
||||
tp_rank: Optional[int] = 0,
|
||||
bias: torch.Tensor | None = None,
|
||||
tp_rank: int | None = 0,
|
||||
) -> torch.Tensor:
|
||||
quantized_x, pertoken_scale = torch_npu.npu_dynamic_quant(x)
|
||||
output = torch_npu.npu_quant_matmul(
|
||||
@@ -116,9 +110,10 @@ class AscendW8A8DynamicFusedMoEMethod(AscendMoEScheme):
|
||||
|
||||
vllm_config = get_current_vllm_config()
|
||||
ascend_config = get_ascend_config()
|
||||
self.use_aclgraph = (vllm_config.compilation_config.mode
|
||||
== CompilationMode.VLLM_COMPILE
|
||||
and not vllm_config.model_config.enforce_eager)
|
||||
self.use_aclgraph = (
|
||||
vllm_config.compilation_config.mode == CompilationMode.VLLM_COMPILE
|
||||
and not vllm_config.model_config.enforce_eager
|
||||
)
|
||||
self.multistream_overlap_gate = ascend_config.multistream_overlap_gate
|
||||
|
||||
self.dynamic_eplb = ascend_config.eplb_config.dynamic_eplb
|
||||
@@ -130,49 +125,34 @@ class AscendW8A8DynamicFusedMoEMethod(AscendMoEScheme):
|
||||
# TODO: Try local_rank = ep_group.rank_in_group
|
||||
local_rank = torch.distributed.get_rank(group=device_group)
|
||||
backend = device_group._get_backend(torch.device("npu"))
|
||||
self.moe_all_to_all_group_name = backend.get_hccl_comm_name(
|
||||
local_rank)
|
||||
self.moe_all_to_all_group_name = backend.get_hccl_comm_name(local_rank)
|
||||
except AttributeError:
|
||||
self.moe_all_to_all_group_name = ""
|
||||
|
||||
def get_weight(self, num_experts: int,
|
||||
intermediate_size_per_partition: int, hidden_sizes: int,
|
||||
params_dtype: torch.dtype) -> Dict[str, Any]:
|
||||
def get_weight(
|
||||
self, num_experts: int, intermediate_size_per_partition: int, hidden_sizes: int, params_dtype: torch.dtype
|
||||
) -> dict[str, Any]:
|
||||
param_dict = {}
|
||||
param_dict["w13_weight"] = torch.empty(num_experts,
|
||||
2 *
|
||||
intermediate_size_per_partition,
|
||||
hidden_sizes,
|
||||
dtype=torch.int8)
|
||||
param_dict["w2_weight"] = torch.empty(num_experts,
|
||||
hidden_sizes,
|
||||
intermediate_size_per_partition,
|
||||
dtype=torch.int8)
|
||||
param_dict["w13_weight"] = torch.empty(
|
||||
num_experts, 2 * intermediate_size_per_partition, hidden_sizes, dtype=torch.int8
|
||||
)
|
||||
param_dict["w2_weight"] = torch.empty(
|
||||
num_experts, hidden_sizes, intermediate_size_per_partition, dtype=torch.int8
|
||||
)
|
||||
return param_dict
|
||||
|
||||
def get_dynamic_quant_param(self, num_experts: int,
|
||||
intermediate_size_per_partition: int,
|
||||
hidden_sizes: int,
|
||||
params_dtype: torch.dtype) -> Dict[str, Any]:
|
||||
def get_dynamic_quant_param(
|
||||
self, num_experts: int, intermediate_size_per_partition: int, hidden_sizes: int, params_dtype: torch.dtype
|
||||
) -> dict[str, Any]:
|
||||
param_dict = {}
|
||||
param_dict["w13_weight_scale"] = torch.empty(
|
||||
num_experts,
|
||||
2 * intermediate_size_per_partition,
|
||||
1,
|
||||
dtype=params_dtype)
|
||||
num_experts, 2 * intermediate_size_per_partition, 1, dtype=params_dtype
|
||||
)
|
||||
param_dict["w13_weight_offset"] = torch.empty(
|
||||
num_experts,
|
||||
2 * intermediate_size_per_partition,
|
||||
1,
|
||||
dtype=params_dtype)
|
||||
param_dict["w2_weight_scale"] = torch.empty(num_experts,
|
||||
hidden_sizes,
|
||||
1,
|
||||
dtype=params_dtype)
|
||||
param_dict["w2_weight_offset"] = torch.empty(num_experts,
|
||||
hidden_sizes,
|
||||
1,
|
||||
dtype=params_dtype)
|
||||
num_experts, 2 * intermediate_size_per_partition, 1, dtype=params_dtype
|
||||
)
|
||||
param_dict["w2_weight_scale"] = torch.empty(num_experts, hidden_sizes, 1, dtype=params_dtype)
|
||||
param_dict["w2_weight_offset"] = torch.empty(num_experts, hidden_sizes, 1, dtype=params_dtype)
|
||||
return param_dict
|
||||
|
||||
def apply(
|
||||
@@ -184,25 +164,26 @@ class AscendW8A8DynamicFusedMoEMethod(AscendMoEScheme):
|
||||
renormalize: bool,
|
||||
use_grouped_topk: bool = False,
|
||||
global_num_experts: int = -1,
|
||||
expert_map: Optional[torch.Tensor] = None,
|
||||
topk_group: Optional[int] = None,
|
||||
num_expert_group: Optional[int] = None,
|
||||
custom_routing_function: Optional[Callable] = None,
|
||||
expert_map: torch.Tensor | None = None,
|
||||
topk_group: int | None = None,
|
||||
num_expert_group: int | None = None,
|
||||
custom_routing_function: Callable | None = None,
|
||||
scoring_func: str = "softmax",
|
||||
routed_scaling_factor: float = 1.0,
|
||||
e_score_correction_bias: Optional[torch.Tensor] = None,
|
||||
e_score_correction_bias: torch.Tensor | None = None,
|
||||
is_prefill: bool = True,
|
||||
enable_force_load_balance: bool = False,
|
||||
log2phy: Optional[torch.Tensor] = None,
|
||||
log2phy: torch.Tensor | None = None,
|
||||
global_redundant_expert_num: int = 0,
|
||||
pertoken_scale: Optional[Any] = None,
|
||||
pertoken_scale: Any | None = None,
|
||||
**kwargs,
|
||||
) -> torch.Tensor:
|
||||
zero_expert_num = getattr(layer, "zero_expert_num", 0)
|
||||
zero_expert_type = getattr(layer, "zero_expert_type", None)
|
||||
if zero_expert_num == 0 or zero_expert_type is None:
|
||||
assert router_logits.shape[1] == global_num_experts - global_redundant_expert_num, \
|
||||
assert router_logits.shape[1] == global_num_experts - global_redundant_expert_num, (
|
||||
"Number of global experts mismatch (excluding redundancy)"
|
||||
)
|
||||
|
||||
if self.multistream_overlap_gate:
|
||||
fc3_context = get_flash_common3_context()
|
||||
@@ -222,7 +203,8 @@ class AscendW8A8DynamicFusedMoEMethod(AscendMoEScheme):
|
||||
scoring_func=scoring_func,
|
||||
routed_scaling_factor=routed_scaling_factor,
|
||||
e_score_correction_bias=e_score_correction_bias,
|
||||
global_num_experts=global_num_experts)
|
||||
global_num_experts=global_num_experts,
|
||||
)
|
||||
assert topk_ids is not None
|
||||
assert topk_weights is not None
|
||||
if zero_expert_num > 0 and zero_expert_type is not None:
|
||||
@@ -237,12 +219,10 @@ class AscendW8A8DynamicFusedMoEMethod(AscendMoEScheme):
|
||||
# to avoid accumulating too much tokens on a single rank.
|
||||
# currently it is only activated when doing profile runs.
|
||||
if enable_force_load_balance:
|
||||
random_matrix = torch.rand(topk_ids.size(0),
|
||||
global_num_experts -
|
||||
global_redundant_expert_num,
|
||||
device=topk_ids.device)
|
||||
topk_ids = torch.argsort(
|
||||
random_matrix, dim=1)[:, :topk_ids.size(1)].to(topk_ids.dtype)
|
||||
random_matrix = torch.rand(
|
||||
topk_ids.size(0), global_num_experts - global_redundant_expert_num, device=topk_ids.device
|
||||
)
|
||||
topk_ids = torch.argsort(random_matrix, dim=1)[:, : topk_ids.size(1)].to(topk_ids.dtype)
|
||||
|
||||
assert topk_weights is not None
|
||||
topk_weights = topk_weights.to(self.in_dtype)
|
||||
@@ -259,9 +239,10 @@ class AscendW8A8DynamicFusedMoEMethod(AscendMoEScheme):
|
||||
w2 = [layer.w2_weight]
|
||||
w2_scale = [layer.w2_weight_scale]
|
||||
|
||||
fused_scale_flag = (get_forward_context().moe_comm_type
|
||||
== MoECommType.FUSED_MC2
|
||||
and envs_ascend.VLLM_ASCEND_ENABLE_FUSED_MC2 == 1)
|
||||
fused_scale_flag = (
|
||||
get_forward_context().moe_comm_type == MoECommType.FUSED_MC2
|
||||
and envs_ascend.VLLM_ASCEND_ENABLE_FUSED_MC2 == 1
|
||||
)
|
||||
final_hidden_states = moe_comm_method.fused_experts(
|
||||
hidden_states=x,
|
||||
pertoken_scale=pertoken_scale,
|
||||
@@ -275,54 +256,35 @@ class AscendW8A8DynamicFusedMoEMethod(AscendMoEScheme):
|
||||
expert_map=expert_map,
|
||||
log2phy=log2phy,
|
||||
dynamic_eplb=self.dynamic_eplb,
|
||||
mc2_mask=kwargs.get("mc2_mask", None))
|
||||
mc2_mask=kwargs.get("mc2_mask"),
|
||||
)
|
||||
if zero_expert_num > 0 and zero_expert_type is not None:
|
||||
final_hidden_states += zero_expert_result
|
||||
return final_hidden_states
|
||||
|
||||
def process_weights_after_loading(self, layer):
|
||||
layer.w13_weight.data = layer.w13_weight.data.transpose(
|
||||
1, 2).contiguous()
|
||||
layer.w2_weight.data = layer.w2_weight.data.transpose(1,
|
||||
2).contiguous()
|
||||
layer.w13_weight.data = layer.w13_weight.data.transpose(1, 2).contiguous()
|
||||
layer.w2_weight.data = layer.w2_weight.data.transpose(1, 2).contiguous()
|
||||
# TODO(zzzzwwjj): Currently, `torch_npu.npu_grouped_matmul_swiglu_quant`
|
||||
# can only support weight nz.
|
||||
layer.w13_weight.data = torch_npu.npu_format_cast(
|
||||
layer.w13_weight.data, ACL_FORMAT_FRACTAL_NZ)
|
||||
layer.w2_weight.data = torch_npu.npu_format_cast(
|
||||
layer.w2_weight.data, ACL_FORMAT_FRACTAL_NZ)
|
||||
layer.w13_weight_scale.data = layer.w13_weight_scale.data.view(
|
||||
layer.w13_weight_scale.data.shape[0], -1)
|
||||
layer.w13_weight_scale_fp32 = layer.w13_weight_scale.data.to(
|
||||
torch.float32)
|
||||
layer.w13_weight_offset.data = layer.w13_weight_offset.data.view(
|
||||
layer.w13_weight_offset.data.shape[0], -1)
|
||||
layer.w2_weight_scale.data = layer.w2_weight_scale.data.view(
|
||||
layer.w2_weight_scale.data.shape[0], -1)
|
||||
layer.w2_weight_offset.data = layer.w2_weight_offset.data.view(
|
||||
layer.w2_weight_offset.data.shape[0], -1)
|
||||
layer.w13_weight.data = torch_npu.npu_format_cast(layer.w13_weight.data, ACL_FORMAT_FRACTAL_NZ)
|
||||
layer.w2_weight.data = torch_npu.npu_format_cast(layer.w2_weight.data, ACL_FORMAT_FRACTAL_NZ)
|
||||
layer.w13_weight_scale.data = layer.w13_weight_scale.data.view(layer.w13_weight_scale.data.shape[0], -1)
|
||||
layer.w13_weight_scale_fp32 = layer.w13_weight_scale.data.to(torch.float32)
|
||||
layer.w13_weight_offset.data = layer.w13_weight_offset.data.view(layer.w13_weight_offset.data.shape[0], -1)
|
||||
layer.w2_weight_scale.data = layer.w2_weight_scale.data.view(layer.w2_weight_scale.data.shape[0], -1)
|
||||
layer.w2_weight_offset.data = layer.w2_weight_offset.data.view(layer.w2_weight_offset.data.shape[0], -1)
|
||||
|
||||
layer.fused_w1_scale = scale_from_float_to_int64(
|
||||
layer.w13_weight_scale.data)
|
||||
layer.fused_w2_scale = scale_from_float_to_int64(
|
||||
layer.w2_weight_scale.data)
|
||||
layer.fused_w1_scale = scale_from_float_to_int64(layer.w13_weight_scale.data)
|
||||
layer.fused_w2_scale = scale_from_float_to_int64(layer.w2_weight_scale.data)
|
||||
|
||||
if self.dynamic_eplb:
|
||||
layer.w13_weight_list = [
|
||||
weight.clone()
|
||||
for weight in layer.w13_weight.data.unbind(dim=0)
|
||||
]
|
||||
layer.w2_weight_list = [
|
||||
weight.clone() for weight in layer.w2_weight.data.unbind(dim=0)
|
||||
]
|
||||
layer.w13_weight_list = [weight.clone() for weight in layer.w13_weight.data.unbind(dim=0)]
|
||||
layer.w2_weight_list = [weight.clone() for weight in layer.w2_weight.data.unbind(dim=0)]
|
||||
layer.w13_weight_scale_fp32_list = [
|
||||
weight.clone()
|
||||
for weight in layer.w13_weight_scale_fp32.data.unbind(dim=0)
|
||||
]
|
||||
layer.w2_weight_scale_list = [
|
||||
weight.clone()
|
||||
for weight in layer.w2_weight_scale.data.unbind(dim=0)
|
||||
weight.clone() for weight in layer.w13_weight_scale_fp32.data.unbind(dim=0)
|
||||
]
|
||||
layer.w2_weight_scale_list = [weight.clone() for weight in layer.w2_weight_scale.data.unbind(dim=0)]
|
||||
del layer.w13_weight
|
||||
del layer.w2_weight
|
||||
del layer.w13_weight_scale
|
||||
|
||||
Reference in New Issue
Block a user