### What this PR does / why we need it?
This PR introduces Oproj matrix tensor model parallel to achieve
decreasing of memory consumption. It only support graph mode in pure DP
scenario.
In deepseek r1 w8a8 PD disagregated Decode instance, using pure DP, with
oproj_tensor_parallel_size = 8, we have 1 ms TPOT increasing, saved 5.8
GB NPU memory per RANK. We got best performance when
oproj_tensor_parallel_size=4 without TPOT increasing.
performance data:
<img width="1442" height="442" alt="image"
src="https://github.com/user-attachments/assets/83270fc5-868a-4387-b0a9-fac29b4a376d"
/>
### Does this PR introduce _any_ user-facing change?
This PR introduces one new config in `additional_config`.
| Name | Effect | Required | Type | Constraints |
| :---------------------------- |
:--------------------------------------- | :------- | :--- |
:----------------- |
| oproj_tensor_parallel_size | Split the o_proj matrix along the row
dimension (head num * head dim) into oproj_tensor_parallel_size pieces.
| No | int | default value is None, once this value is set, the feature
will be enabled, head num * head dim must be divisible by this value. |
example
`--additional_config={"oproj_tensor_parallel_size": 8}`
### How was this patch tested?
- vLLM version: v0.10.1.1
- vLLM main:
eddaafc1c7
---------
Signed-off-by: zzhx1 <zzh_201018@outlook.com>
Co-authored-by: zzh <zzh_201018@outlook.com>
356 lines
15 KiB
Python
356 lines
15 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
from types import MappingProxyType
|
|
from typing import Any, Callable, Dict, List, Mapping, Optional
|
|
|
|
import torch
|
|
from vllm.distributed import get_tensor_model_parallel_rank
|
|
from vllm.model_executor.layers.fused_moe import (FusedMoE, FusedMoEMethodBase,
|
|
FusedMoeWeightScaleSupported)
|
|
from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase,
|
|
RowParallelLinear,
|
|
UnquantizedLinearMethod)
|
|
from vllm.model_executor.layers.quantization import \
|
|
register_quantization_config
|
|
from vllm.model_executor.layers.quantization.base_config import (
|
|
QuantizationConfig, QuantizeMethodBase)
|
|
from vllm.model_executor.layers.quantization.kv_cache import BaseKVCacheMethod
|
|
from vllm.model_executor.layers.vocab_parallel_embedding import (
|
|
UnquantizedEmbeddingMethod, VocabParallelEmbedding)
|
|
from vllm.model_executor.parameter import PerTensorScaleParameter
|
|
from vllm.model_executor.utils import set_weight_attrs
|
|
|
|
from vllm_ascend.distributed.parallel_state import (get_mlp_tp_group,
|
|
get_otp_group)
|
|
from vllm_ascend.ops.fused_moe import AscendUnquantizedFusedMoEMethod
|
|
from vllm_ascend.utils import (ASCEND_QUANTIZATION_METHOD, mlp_tp_enable,
|
|
oproj_tp_enable)
|
|
|
|
from .utils import get_quant_method
|
|
|
|
|
|
@register_quantization_config(ASCEND_QUANTIZATION_METHOD)
|
|
class AscendQuantConfig(QuantizationConfig):
|
|
"""Config class for Ascend
|
|
|
|
This class is a general class that parse quantization configs
|
|
that are supported on ascend hardware.
|
|
"""
|
|
|
|
def __init__(self, quant_config: Dict[str, Any]):
|
|
self.quant_description = quant_config
|
|
|
|
def __repr__(self) -> str:
|
|
return "AscendQuantConfig:\n" + super().__repr__()
|
|
|
|
@classmethod
|
|
def get_name(cls) -> str:
|
|
return ASCEND_QUANTIZATION_METHOD
|
|
|
|
@classmethod
|
|
def get_supported_act_dtypes(cls) -> List[torch.dtype]:
|
|
return [torch.int8, torch.float16, torch.bfloat16]
|
|
|
|
@classmethod
|
|
def get_min_capability(cls) -> int:
|
|
raise NotImplementedError(
|
|
"Ascend hardware dose not support \"get_min_capability\" feature.")
|
|
|
|
@classmethod
|
|
def get_config_filenames(cls) -> List[str]:
|
|
return ["quant_model_description.json"]
|
|
|
|
@classmethod
|
|
def from_config(cls, config: Dict[str, Any]) -> "AscendQuantConfig":
|
|
return cls(config)
|
|
|
|
@classmethod
|
|
def override_quantization_method(cls, hf_quant_cfg,
|
|
user_quant) -> Optional[str]:
|
|
if torch.npu.is_available():
|
|
return ASCEND_QUANTIZATION_METHOD
|
|
return None
|
|
|
|
def get_quant_method(self, layer: torch.nn.Module,
|
|
prefix: str) -> Optional["QuantizeMethodBase"]:
|
|
from vllm.attention.layer import Attention
|
|
if isinstance(layer, LinearBase):
|
|
if self.is_layer_skipped_ascend(prefix,
|
|
self.packed_modules_mapping):
|
|
return UnquantizedLinearMethod()
|
|
return AscendLinearMethod(self, prefix,
|
|
self.packed_modules_mapping)
|
|
elif isinstance(layer, Attention) and \
|
|
'fa_quant_type' in self.quant_description.keys() and \
|
|
self.quant_description['fa_quant_type'] is not None:
|
|
return AscendKVCacheMethod(self, prefix)
|
|
elif isinstance(layer, Attention) and self.quant_description.get(
|
|
'kv_quant_type') == 'C8':
|
|
return AscendKVCacheMethod(self, prefix)
|
|
elif isinstance(layer, FusedMoE):
|
|
if self.is_layer_skipped_ascend(prefix,
|
|
self.packed_modules_mapping):
|
|
return AscendUnquantizedFusedMoEMethod(layer.moe_config)
|
|
return AscendFusedMoEMethod(self, prefix,
|
|
self.packed_modules_mapping)
|
|
elif isinstance(layer, VocabParallelEmbedding):
|
|
if self.is_layer_skipped_ascend(prefix,
|
|
self.packed_modules_mapping):
|
|
return UnquantizedEmbeddingMethod()
|
|
return AscendEmbeddingMethod(self, prefix,
|
|
self.packed_modules_mapping)
|
|
return None
|
|
|
|
def is_layer_skipped_ascend(
|
|
self,
|
|
prefix: str,
|
|
fused_mapping: Mapping[str, List[str]] = MappingProxyType({})):
|
|
# adapted from vllm.model_executor.layers.quantization.utils.quant_utils.is_layer_skipped
|
|
proj_name = prefix.split(".")[-1]
|
|
if proj_name in fused_mapping:
|
|
shard_prefixes = [
|
|
prefix.replace(proj_name, shard_proj_name)
|
|
for shard_proj_name in fused_mapping[proj_name]
|
|
]
|
|
|
|
is_skipped = None
|
|
for shard_prefix in shard_prefixes:
|
|
is_shard_skipped = self.quant_description[shard_prefix +
|
|
'.weight'] == "FLOAT"
|
|
|
|
if is_skipped is None:
|
|
is_skipped = is_shard_skipped
|
|
elif is_shard_skipped != is_skipped:
|
|
raise ValueError(
|
|
f"Detected some but not all shards of {prefix} "
|
|
"are quantized. All shards of fused layers "
|
|
"to have the same precision.")
|
|
else:
|
|
is_skipped = self.quant_description[prefix + '.weight'] == "FLOAT"
|
|
|
|
assert is_skipped is not None
|
|
return is_skipped
|
|
|
|
def get_scaled_act_names(self) -> List[str]:
|
|
return []
|
|
|
|
|
|
class AscendLinearMethod(LinearMethodBase):
|
|
"""Linear method for Ascend quantization.
|
|
|
|
Args:
|
|
quant_config: The Ascend quantization config.
|
|
"""
|
|
|
|
def __init__(self, quant_config: AscendQuantConfig, prefix: str,
|
|
packed_modules_mapping: Dict[str, Any]) -> None:
|
|
self.quant_method = get_quant_method(quant_config.quant_description,
|
|
prefix, "linear",
|
|
packed_modules_mapping)
|
|
|
|
def create_weights(
|
|
self,
|
|
layer: torch.nn.Module,
|
|
input_size_per_partition: int,
|
|
output_partition_sizes: List[int],
|
|
input_size: int,
|
|
output_size: int,
|
|
params_dtype: torch.dtype,
|
|
**extra_weight_attrs,
|
|
) -> None:
|
|
output_size_per_partition = sum(output_partition_sizes)
|
|
weight_loader = extra_weight_attrs.get("weight_loader")
|
|
|
|
weight_dict = self.quant_method.get_weight(input_size_per_partition,
|
|
output_size_per_partition,
|
|
params_dtype)
|
|
for weight_name, weight_param in weight_dict.items():
|
|
param = torch.nn.Parameter(weight_param, requires_grad=False)
|
|
set_weight_attrs(param, {"input_dim": 1, "output_dim": 0})
|
|
layer.register_parameter(weight_name, param)
|
|
set_weight_attrs(param, extra_weight_attrs)
|
|
|
|
pertensor_dict = self.quant_method.get_pertensor_param(params_dtype)
|
|
for pertensor_name, pertensor_param in pertensor_dict.items():
|
|
param = PerTensorScaleParameter(data=pertensor_param,
|
|
weight_loader=weight_loader)
|
|
# disable warning
|
|
param.ignore_warning = True
|
|
layer.register_parameter(pertensor_name, param)
|
|
|
|
perchannel_dict = self.quant_method.get_perchannel_param(
|
|
output_size_per_partition, params_dtype)
|
|
for perchannel_name, perchannel_param in perchannel_dict.items():
|
|
param = torch.nn.Parameter(perchannel_param, requires_grad=False)
|
|
set_weight_attrs(param, {"output_dim": 0})
|
|
layer.register_parameter(perchannel_name, param)
|
|
set_weight_attrs(param, extra_weight_attrs)
|
|
|
|
pergroup_dict = self.quant_method.get_pergroup_param(
|
|
input_size_per_partition, output_size_per_partition, params_dtype)
|
|
for pergroup_name, pergroup_param in pergroup_dict.items():
|
|
param = torch.nn.Parameter(pergroup_param, requires_grad=False)
|
|
set_weight_attrs(param, {"output_dim": 0})
|
|
layer.register_parameter(pergroup_name, param)
|
|
set_weight_attrs(param, extra_weight_attrs)
|
|
if "weight_scale_second" in pergroup_name or "weight_offset_second" in pergroup_name:
|
|
setattr(param, "input_dim", 1)
|
|
param.input_dim = 1
|
|
|
|
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
|
|
if hasattr(self.quant_method, "process_weights_after_loading"):
|
|
self.quant_method.process_weights_after_loading(layer)
|
|
|
|
def apply(
|
|
self,
|
|
layer: torch.nn.Module,
|
|
x: torch.Tensor,
|
|
bias: Optional[torch.Tensor] = None,
|
|
) -> torch.Tensor:
|
|
if isinstance(layer, RowParallelLinear):
|
|
if layer.prefix.find("o_proj") != -1 and oproj_tp_enable():
|
|
tp_rank = get_otp_group().rank_in_group
|
|
elif layer.prefix.find("down_proj") != -1 and mlp_tp_enable():
|
|
tp_rank = get_mlp_tp_group().rank_in_group
|
|
else:
|
|
tp_rank = get_tensor_model_parallel_rank()
|
|
else:
|
|
tp_rank = 0
|
|
return self.quant_method.apply(layer, x, bias, tp_rank)
|
|
|
|
|
|
class AscendKVCacheMethod(BaseKVCacheMethod):
|
|
"""KVCache method for Ascend quantization.
|
|
|
|
Args:
|
|
quant_config: The Ascend quantization config.
|
|
"""
|
|
|
|
def __init__(self, quant_config: AscendQuantConfig, prefix: str) -> None:
|
|
self.quant_method = get_quant_method(quant_config.quant_description,
|
|
prefix, "attention")
|
|
|
|
def create_weights(self, layer: torch.nn.Module) -> None:
|
|
# Different from linear method, there are no weight processing/slicing
|
|
# steps for attention in vllm. So the whole process of create weights
|
|
# is hidden into the specific quant method.
|
|
self.quant_method.create_weights(layer)
|
|
|
|
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
|
|
if hasattr(self.quant_method, "process_weights_after_loading"):
|
|
self.quant_method.process_weights_after_loading(layer)
|
|
|
|
def apply(self, layer: torch.nn.Module, query: torch.Tensor,
|
|
key: torch.Tensor, value: torch.Tensor, kv_cache, attn_metadata,
|
|
attn_type, scale, output) -> torch.Tensor:
|
|
return self.quant_method.apply(layer, query, key, value, kv_cache,
|
|
attn_metadata, attn_type, scale, output)
|
|
|
|
|
|
class AscendFusedMoEMethod(FusedMoEMethodBase):
|
|
"""FusedMoE method for Ascend quantization.
|
|
|
|
Args:
|
|
quant_config: The Ascend quantization config.
|
|
"""
|
|
|
|
def __init__(self, quant_config: AscendQuantConfig, prefix: str,
|
|
packed_modules_mapping: Dict[str, Any]):
|
|
self.quant_method = get_quant_method(quant_config.quant_description,
|
|
prefix, "moe",
|
|
packed_modules_mapping)
|
|
|
|
def create_weights(
|
|
self,
|
|
layer: torch.nn.Module,
|
|
num_experts: int,
|
|
hidden_size: int,
|
|
intermediate_size_per_partition: int,
|
|
params_dtype: torch.dtype,
|
|
**extra_weight_attrs,
|
|
) -> None:
|
|
weight_param = self.quant_method.get_weight(
|
|
num_experts, intermediate_size_per_partition, hidden_size,
|
|
params_dtype)
|
|
for param_key, param_value in weight_param.items():
|
|
param = torch.nn.Parameter(param_value, requires_grad=False)
|
|
layer.register_parameter(param_key, param)
|
|
set_weight_attrs(param, extra_weight_attrs)
|
|
|
|
extra_weight_attrs.update(
|
|
{"quant_method": FusedMoeWeightScaleSupported.CHANNEL.value})
|
|
per_group_param = [
|
|
"weight_scale_second", "weight_offset_second", "scale_bias"
|
|
]
|
|
dynamic_quant_param = self.quant_method.get_dynamic_quant_param(
|
|
num_experts, intermediate_size_per_partition, hidden_size,
|
|
params_dtype)
|
|
for param_key, param_value in dynamic_quant_param.items():
|
|
param = torch.nn.Parameter(param_value, requires_grad=False)
|
|
layer.register_parameter(param_key, param)
|
|
set_weight_attrs(param, extra_weight_attrs)
|
|
if any(fields in param_key for fields in per_group_param):
|
|
setattr(param, "quant_method",
|
|
FusedMoeWeightScaleSupported.GROUP.value)
|
|
|
|
def apply(
|
|
self,
|
|
layer: torch.nn.Module,
|
|
x: torch.Tensor,
|
|
router_logits: torch.Tensor,
|
|
top_k: int,
|
|
renormalize: bool,
|
|
use_grouped_topk: bool = False,
|
|
global_num_experts: int = -1,
|
|
expert_map: Optional[torch.Tensor] = None,
|
|
topk_group: Optional[int] = None,
|
|
num_expert_group: Optional[int] = None,
|
|
custom_routing_function: Optional[Callable] = None,
|
|
scoring_func: str = "softmax",
|
|
e_score_correction_bias: Optional[torch.Tensor] = None,
|
|
is_prefill: bool = True,
|
|
enable_force_load_balance: bool = False,
|
|
log2phy: torch.Tensor = None,
|
|
global_redundant_expert_num=0,
|
|
**kwargs,
|
|
) -> torch.Tensor:
|
|
return self.quant_method.apply(
|
|
layer, x, router_logits, top_k, renormalize, use_grouped_topk,
|
|
global_num_experts, expert_map, topk_group, num_expert_group,
|
|
custom_routing_function, scoring_func, e_score_correction_bias,
|
|
is_prefill, enable_force_load_balance, log2phy,
|
|
global_redundant_expert_num, **kwargs)
|
|
|
|
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
|
|
if hasattr(self.quant_method, "process_weights_after_loading"):
|
|
self.quant_method.process_weights_after_loading(layer)
|
|
|
|
|
|
class AscendEmbeddingMethod(AscendLinearMethod):
|
|
"""Embedding method for Ascend quantization.
|
|
|
|
Args:
|
|
quant_config: The Ascend quantization config.
|
|
"""
|
|
|
|
def __init__(self, quant_config: AscendQuantConfig, prefix: str,
|
|
packed_modules_mapping: Dict[str, Any]) -> None:
|
|
self.quant_method = get_quant_method(quant_config.quant_description,
|
|
prefix, "linear",
|
|
packed_modules_mapping)
|