### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
|`vllm_ascend/ops/layer_shard_linear.py`|
|`vllm_ascend/ops/linear.py`|
|`vllm_ascend/ops/linear_op.py`|
|`vllm_ascend/worker/worker.py`|
| ` vllm_ascend/patch/worker/patch_bert.py` |
| ` vllm_ascend/patch/worker/patch_deepseek.py` |
| ` vllm_ascend/patch/worker/patch_distributed.py` |
| ` vllm_ascend/patch/worker/patch_module.py` |
| ` vllm_ascend/patch/worker/patch_multimodal_merge.py` |
| ` vllm_ascend/patch/worker/patch_qwen3_next.py` |
| ` vllm_ascend/patch/worker/patch_qwen3_next_mtp.py` |
| ` vllm_ascend/patch/worker/patch_rejection_sampler.py` |
| ` vllm_ascend/patch/worker/patch_rope.py` |
| ` vllm_ascend/patch/worker/patch_triton.py` |
| ` vllm_ascend/patch/worker/patch_unquantized_gemm.py` |
| ` vllm_ascend/patch/worker/patch_v2_egale.py` |
|` vllm_ascend/worker/npu_input_batch.py`|
|` vllm_ascend/worker/v2/aclgraph_utils.py`|
|` vllm_ascend/worker/v2/attn_utils.py`|
|` vllm_ascend/worker/v2/model_runner.py`|
|` vllm_ascend/worker/v2/sample/gumbel.py`|
|` vllm_ascend/worker/v2/sample/penalties.py`|
|` vllm_ascend/worker/v2/sample/sampler.py`|
|` vllm_ascend/worker/v2/spec_decode/__init__.py`|
|` vllm_ascend/worker/v2/spec_decode/eagle.py`|
|` vllm_ascend/worker/v2/states.py`|
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.14.0
- vLLM main:
d68209402d
Signed-off-by: MrZ20 <2609716663@qq.com>
Signed-off-by: SILONG ZENG <2609716663@qq.com>
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
from collections.abc import Callable
|
||||
from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
from typing import Callable, List, Optional
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
@@ -17,39 +17,38 @@ def dispose_tensor(x: torch.Tensor):
|
||||
|
||||
@dataclass
|
||||
class LayerMetadata:
|
||||
"""Metadata for a layer.
|
||||
"""
|
||||
"""Metadata for a layer."""
|
||||
|
||||
layer_idx: int # The index of the layer.
|
||||
layer: LinearBase # The layer object.
|
||||
post_method: Callable[[
|
||||
torch.nn.Module
|
||||
], None] # The `process_weights_after_loading` method from the quant method.
|
||||
post_method: Callable[[torch.nn.Module], None] # The `process_weights_after_loading` method from the quant method.
|
||||
weight: torch.Tensor # The weight tensor.
|
||||
window_idx: int # The index of the window.
|
||||
|
||||
|
||||
@dataclass
|
||||
class ShardWindowMetadata:
|
||||
"""Metadata for a shard window.
|
||||
"""
|
||||
"""Metadata for a shard window."""
|
||||
|
||||
weight: torch.Tensor # The weight tensor to be shard by layers.
|
||||
data_layer_idx: int # The index of the layer this window's weight is equal to.
|
||||
work: Optional[torch.distributed.Work] # The asynchronous broadcast work.
|
||||
work: torch.distributed.Work | None # The asynchronous broadcast work.
|
||||
|
||||
|
||||
@dataclass
|
||||
class SeriesMetadata:
|
||||
"""Metadata for a weight shard series.
|
||||
"""
|
||||
"""Metadata for a weight shard series."""
|
||||
|
||||
group: GroupCoordinator
|
||||
start_layer: int
|
||||
end_layer: int
|
||||
num_layers: int
|
||||
prefetch_step: int
|
||||
dummy_weight: torch.Tensor # Dummy weight to replace the loaded weight matrix. All the layers in the series share the same dummy weight tensor.
|
||||
dummy_weight: torch.Tensor # Dummy weight to replace the loaded weight matrix.
|
||||
# All the layers in the series share the same dummy weight tensor.
|
||||
layers: list[LayerMetadata]
|
||||
shard_windows: list[
|
||||
ShardWindowMetadata] # Shard windows for prefetching. The window size is (`prefetch_step` + 1), as only the weights for the next (`prefetch_step` + 1) layers need to be stored.
|
||||
shard_windows: list[ShardWindowMetadata] # Shard windows for prefetching. The window size is (`prefetch_step` + 1),
|
||||
# as only the weights for the next (`prefetch_step` + 1) layers need to be stored.
|
||||
window_offset: int # The index of the window for the next coming layer.
|
||||
|
||||
def is_source(self, layer_idx) -> bool:
|
||||
@@ -63,9 +62,9 @@ class SeriesMetadata:
|
||||
self.layers.sort(key=lambda x: x.layer_idx)
|
||||
self.num_layers = len(self.layers)
|
||||
assert self.num_layers > 0, "No layers in the series"
|
||||
assert self.prefetch_step >= 0 and self.prefetch_step <= max(
|
||||
0, self.num_layers -
|
||||
2), "prefetch_step must be in [0, num_layers - 2]"
|
||||
assert self.prefetch_step >= 0 and self.prefetch_step <= max(0, self.num_layers - 2), (
|
||||
"prefetch_step must be in [0, num_layers - 2]"
|
||||
)
|
||||
self.start_layer = self.layers[0].layer_idx
|
||||
self.end_layer = self.layers[-1].layer_idx + 1
|
||||
|
||||
@@ -73,25 +72,27 @@ class SeriesMetadata:
|
||||
layer = self.layers[layer_idx - self.start_layer]
|
||||
assert layer.layer_idx == layer_idx, "layer_idx must be consecutive"
|
||||
is_source = self.is_source(layer_idx)
|
||||
# If the weight uses dummy weight, make a copy temporary such that the post method call won't affect other layers which also uses dummy weight.
|
||||
# If the weight uses dummy weight, make a copy temporary such that the post method call
|
||||
# won't affect other layers which also uses dummy weight.
|
||||
if not is_source:
|
||||
layer.weight.set_(torch.empty_like(self.dummy_weight))
|
||||
# Broadcast to get the true weight.
|
||||
dist.broadcast(layer.weight,
|
||||
src=self.group.ranks[layer_idx %
|
||||
self.group.world_size],
|
||||
group=self.group.device_group)
|
||||
dist.broadcast(
|
||||
layer.weight, src=self.group.ranks[layer_idx % self.group.world_size], group=self.group.device_group
|
||||
)
|
||||
# Call `process_weights_after_loading` from the quant method.
|
||||
layer.post_method(layer.layer)
|
||||
step = layer_idx - self.start_layer
|
||||
if step < self.prefetch_step:
|
||||
# Build the windows for the first `prefetch_step` layers. The weights can be used for the first `prefetch_step` layers in `forward()`, so also clone the weights.
|
||||
# Build the windows for the first `prefetch_step` layers. The weights can be used
|
||||
# for the first `prefetch_step` layers in `forward()`, so also clone the weights.
|
||||
self.shard_windows.append(
|
||||
ShardWindowMetadata(
|
||||
weight=layer.weight.clone().detach(),
|
||||
data_layer_idx=layer_idx,
|
||||
work=None,
|
||||
))
|
||||
)
|
||||
)
|
||||
layer.window_idx = step
|
||||
# When the layer not intended to be stored in this device, link to the corresponding window's tensor.
|
||||
if not is_source:
|
||||
@@ -104,7 +105,8 @@ class SeriesMetadata:
|
||||
weight=torch.empty_like(layer.weight),
|
||||
data_layer_idx=-1,
|
||||
work=None,
|
||||
))
|
||||
)
|
||||
)
|
||||
# When the layer not intended to be stored in this device, dispose the tensor.
|
||||
if not is_source:
|
||||
dispose_tensor(layer.weight)
|
||||
@@ -113,8 +115,7 @@ class SeriesMetadata:
|
||||
|
||||
def reach_layer(self, layer_idx: int):
|
||||
# The index of the layer to be prefetched.
|
||||
next_layer_idx = (layer_idx + self.prefetch_step
|
||||
) % self.num_layers + self.start_layer
|
||||
next_layer_idx = (layer_idx + self.prefetch_step) % self.num_layers + self.start_layer
|
||||
next_layer = self.layers[next_layer_idx - self.start_layer]
|
||||
# The index of the window to store the weight for the coming layer.
|
||||
next_layer.window_idx = self.window_offset
|
||||
@@ -123,8 +124,7 @@ class SeriesMetadata:
|
||||
if not self.is_source(next_layer_idx):
|
||||
next_layer.weight.set_(window.weight)
|
||||
# Update `window_offset` by rolling one step.
|
||||
self.window_offset = (self.window_offset + 1) % (self.prefetch_step +
|
||||
1)
|
||||
self.window_offset = (self.window_offset + 1) % (self.prefetch_step + 1)
|
||||
assert window.data_layer_idx != next_layer_idx
|
||||
window.data_layer_idx = next_layer_idx
|
||||
# Start asynchronous broadcast work.
|
||||
@@ -132,13 +132,13 @@ class SeriesMetadata:
|
||||
next_layer.weight,
|
||||
src=self.group.ranks[next_layer_idx % self.group.world_size],
|
||||
group=self.group.device_group,
|
||||
async_op=True)
|
||||
async_op=True,
|
||||
)
|
||||
|
||||
def wait_weight(self, layer_idx: int):
|
||||
# Find the asynchronous broadcast work and wait for it.
|
||||
assert self.shard_windows
|
||||
window = self.shard_windows[self.layers[layer_idx -
|
||||
self.start_layer].window_idx]
|
||||
window = self.shard_windows[self.layers[layer_idx - self.start_layer].window_idx]
|
||||
# Make sure the data in the corresponding shard window is for the current layer.
|
||||
assert window.data_layer_idx == layer_idx
|
||||
if window.work is not None:
|
||||
@@ -148,8 +148,8 @@ class SeriesMetadata:
|
||||
|
||||
@dataclass
|
||||
class LayerExternalMetadata:
|
||||
"""External metadata for a layer.
|
||||
"""
|
||||
"""External metadata for a layer."""
|
||||
|
||||
series: SeriesMetadata
|
||||
layer_idx: int
|
||||
|
||||
@@ -159,9 +159,7 @@ _series_dict: dict[str, SeriesMetadata] = {}
|
||||
_layer_external_dict: dict[int, LayerExternalMetadata] = {}
|
||||
|
||||
|
||||
def _create_forward_wrapper(forward: Callable, series: SeriesMetadata,
|
||||
layer_idx: int) -> Callable:
|
||||
|
||||
def _create_forward_wrapper(forward: Callable, series: SeriesMetadata, layer_idx: int) -> Callable:
|
||||
def wrapped_forward(*args, **kwargs):
|
||||
# Wait for the weight.
|
||||
series.wait_weight(layer_idx)
|
||||
@@ -173,23 +171,32 @@ def _create_forward_wrapper(forward: Callable, series: SeriesMetadata,
|
||||
"""
|
||||
Register linear layers into a shard storage series.
|
||||
|
||||
In a parallel group, each device stores a distinct, non-overlapping subset of layers from the series. All layers in a series must have the same structure (are isomorphic). The weight matrix for the i-th layer is stored on device (i % n), where n is the number of devices.
|
||||
In a parallel group, each device stores a distinct, non-overlapping subset of layers from the series.
|
||||
All layers in a series must have the same structure (are isomorphic). The weight matrix for the i-th layer
|
||||
is stored on device (i % n), where n is the number of devices.
|
||||
|
||||
After loading the model, you must call `post_process_after_loading_for_shard_weight_series(layer)` on any layer of this series to complete the initialization.
|
||||
After loading the model, you must call `post_process_after_loading_for_shard_weight_series(layer)`
|
||||
on any layer of this series to complete the initialization.
|
||||
|
||||
During execution, each time a new layer is reached, you must call `reach_layer_for_shard_weight_series(layer)` for that layer to prefetch the weights. The argument `prefetch_step` is a non-negative integer k that manages asynchronous weight prefetching. Each call to `reach_layer_for_shard_weight_series(current_layer)` method will trigger an asynchronous prefetch for the weights of the k-th subsequent layer after `current_layer` within the series.
|
||||
During execution, each time a new layer is reached, you must call `reach_layer_for_shard_weight_series(layer)`
|
||||
for that layer to prefetch the weights. The argument `prefetch_step` is a non-negative integer k that manages
|
||||
asynchronous weight prefetching. Each call to `reach_layer_for_shard_weight_series(current_layer)` method will
|
||||
trigger an asynchronous prefetch for the weights of the k-th subsequent layer after `current_layer` within the series.
|
||||
|
||||
Note: The layers are managed as a circular buffer. The index of the layer to prefetch is determined by the formula:
|
||||
- start_layer is the index of the first layer in the series (inclusive).
|
||||
- end_layer is the index of the last layer in the series (exclusive). Thus, the series includes all layers with indices in the range [start_layer, end_layer).
|
||||
- end_layer is the index of the last layer in the series (exclusive). Thus, the series includes all layers with
|
||||
indices in the range [start_layer, end_layer).
|
||||
- total_layers = end_layer - start_layer
|
||||
- prefetch_layer_idx = (layer_idx + prefetch_step) % total_layers + start_layer
|
||||
|
||||
To hold the weights for the current layer and the k prefetched layers, a pool of (k + 1) shard tensor buffers will be created for this series.
|
||||
To hold the weights for the current layer and the k prefetched layers, a pool of (k + 1) shard tensor buffers
|
||||
will be created for this series.
|
||||
|
||||
Arguments:
|
||||
series_name: This name identifies which series this layer belongs to.
|
||||
group: The group coordinator for handling asynchronous communications. It is recommended to create a new group coordinator for each new series.
|
||||
group: The group coordinator for handling asynchronous communications. It is recommended to create a new group
|
||||
coordinator for each new series.
|
||||
layer: The linear layer object to register.
|
||||
prefetch_step: An integer that manages asynchronous weight prefetching. Setting it to 0 or 1 can cover most cases.
|
||||
"""
|
||||
@@ -224,7 +231,8 @@ def register_layer_to_shard_weight_series(
|
||||
post_method=layer.quant_method.process_weights_after_loading,
|
||||
weight=layer.weight,
|
||||
window_idx=-1,
|
||||
))
|
||||
)
|
||||
)
|
||||
# Discard the original `process_weights_after_loading` method such that it won't be called by others.
|
||||
layer.quant_method.process_weights_after_loading = lambda layer: None
|
||||
# When the layer not intended to be stored in this device, dispose the tensor and skip weight loading.
|
||||
@@ -257,6 +265,7 @@ def wait_layer_for_shard_weight_series(layer: LinearBase):
|
||||
@lru_cache(maxsize=1)
|
||||
def get_current_model_num_hidden_layers() -> int:
|
||||
from vllm.config import get_current_vllm_config
|
||||
|
||||
vllm_config = get_current_vllm_config()
|
||||
return vllm_config.model_config.get_total_num_hidden_layers()
|
||||
|
||||
@@ -268,10 +277,11 @@ def is_hidden_layer(layer: LinearBase) -> bool:
|
||||
|
||||
|
||||
def register_all_layers_to_shard_weight_series(
|
||||
layer_sharding: List[LinearBase], ):
|
||||
for curr_layer in (layer_sharding or []):
|
||||
layer_sharding: list[LinearBase],
|
||||
):
|
||||
for curr_layer in layer_sharding or []:
|
||||
if is_hidden_layer(curr_layer):
|
||||
layer_name = curr_layer.prefix.split('.')[-1]
|
||||
layer_name = curr_layer.prefix.split(".")[-1]
|
||||
register_layer_to_shard_weight_series(
|
||||
series_name=layer_name,
|
||||
group=get_shard_weight_group(),
|
||||
|
||||
@@ -20,19 +20,23 @@ AscendMergedColumnParallelLinear, AscendMergedColumnParallelLinear,
|
||||
AscendRowParallelLinear and AscendColumnParallelLinear.
|
||||
"""
|
||||
|
||||
from typing import Optional, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.nn.parameter import Parameter
|
||||
from vllm.config import get_current_vllm_config
|
||||
from vllm.distributed import divide
|
||||
from vllm.model_executor.layers.linear import ( # noqa
|
||||
WEIGHT_LOADER_V2_SUPPORTED, ColumnParallelLinear, LinearBase,
|
||||
MergedColumnParallelLinear, QKVParallelLinear, QuantizeMethodBase,
|
||||
ReplicatedLinear, RowParallelLinear, UnquantizedLinearMethod)
|
||||
from vllm.model_executor.layers.quantization.base_config import \
|
||||
QuantizationConfig
|
||||
WEIGHT_LOADER_V2_SUPPORTED,
|
||||
ColumnParallelLinear,
|
||||
LinearBase,
|
||||
MergedColumnParallelLinear,
|
||||
QKVParallelLinear,
|
||||
QuantizeMethodBase,
|
||||
ReplicatedLinear,
|
||||
RowParallelLinear,
|
||||
UnquantizedLinearMethod,
|
||||
)
|
||||
from vllm.model_executor.layers.quantization.base_config import QuantizationConfig
|
||||
from vllm.model_executor.utils import set_weight_attrs
|
||||
|
||||
from vllm_ascend.ops.linear_op import get_parallel_op, get_replicated_op
|
||||
@@ -50,14 +54,13 @@ class AscendUnquantizedLinearMethod(UnquantizedLinearMethod):
|
||||
|
||||
# TODO(realliujiaxu): Remove this class after linear of vllm supports custom comm group
|
||||
class AscendLinearBase(LinearBase):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_size: int,
|
||||
output_size: int,
|
||||
skip_bias_add: bool = False,
|
||||
params_dtype: Optional[torch.dtype] = None,
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
params_dtype: torch.dtype | None = None,
|
||||
quant_config: QuantizationConfig | None = None,
|
||||
prefix: str = "",
|
||||
*,
|
||||
return_bias: bool = True,
|
||||
@@ -75,11 +78,9 @@ class AscendLinearBase(LinearBase):
|
||||
self.quant_config = quant_config
|
||||
self.prefix = prefix
|
||||
if quant_config is None:
|
||||
self.quant_method: Optional[
|
||||
QuantizeMethodBase] = AscendUnquantizedLinearMethod()
|
||||
self.quant_method: QuantizeMethodBase | None = AscendUnquantizedLinearMethod()
|
||||
else:
|
||||
self.quant_method = quant_config.get_quant_method(self,
|
||||
prefix=prefix)
|
||||
self.quant_method = quant_config.get_quant_method(self, prefix=prefix)
|
||||
self.return_bias = return_bias
|
||||
self.disable_tp = disable_tp
|
||||
|
||||
@@ -100,11 +101,11 @@ class AscendQKVParallelLinear(QKVParallelLinear):
|
||||
hidden_size: int,
|
||||
head_size: int,
|
||||
total_num_heads: int,
|
||||
total_num_kv_heads: Optional[int] = None,
|
||||
total_num_kv_heads: int | None = None,
|
||||
bias: bool = True,
|
||||
skip_bias_add: bool = False,
|
||||
params_dtype: Optional[torch.dtype] = None,
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
params_dtype: torch.dtype | None = None,
|
||||
quant_config: QuantizationConfig | None = None,
|
||||
prefix: str = "",
|
||||
*,
|
||||
return_bias: bool = True,
|
||||
@@ -112,9 +113,9 @@ class AscendQKVParallelLinear(QKVParallelLinear):
|
||||
v_head_size: int | None = None,
|
||||
):
|
||||
self.v_head_size = v_head_size if v_head_size is not None else head_size
|
||||
self.custom_op, _, tp_size = get_parallel_op(disable_tp, prefix, self,
|
||||
"column")
|
||||
# TODO(realliujiaxu): Replace the initialization code below with super().__init__ after linear of vllm supports custom comm group
|
||||
self.custom_op, _, tp_size = get_parallel_op(disable_tp, prefix, self, "column")
|
||||
# TODO(realliujiaxu): Replace the initialization code below with super().__init__ after
|
||||
# linear of vllm supports custom comm group
|
||||
self.hidden_size = hidden_size
|
||||
self.head_size = head_size
|
||||
self.total_num_heads = total_num_heads
|
||||
@@ -125,35 +126,35 @@ class AscendQKVParallelLinear(QKVParallelLinear):
|
||||
self.num_heads = divide(self.total_num_heads, tp_size)
|
||||
if tp_size >= self.total_num_kv_heads:
|
||||
self.num_kv_heads = 1
|
||||
self.num_kv_head_replicas = divide(tp_size,
|
||||
self.total_num_kv_heads)
|
||||
self.num_kv_head_replicas = divide(tp_size, self.total_num_kv_heads)
|
||||
else:
|
||||
self.num_kv_heads = divide(self.total_num_kv_heads, tp_size)
|
||||
self.num_kv_head_replicas = 1
|
||||
input_size = self.hidden_size
|
||||
output_size = (self.num_heads +
|
||||
2 * self.num_kv_heads) * tp_size * self.head_size
|
||||
output_size = (self.num_heads + 2 * self.num_kv_heads) * tp_size * self.head_size
|
||||
self.output_sizes = [
|
||||
self.num_heads * self.head_size * tp_size, # q_proj
|
||||
self.num_kv_heads * self.head_size * tp_size, # k_proj
|
||||
self.num_kv_heads * self.head_size * tp_size, # v_proj
|
||||
]
|
||||
AscendColumnParallelLinear.__init__(self,
|
||||
input_size=input_size,
|
||||
output_size=output_size,
|
||||
bias=bias,
|
||||
gather_output=False,
|
||||
skip_bias_add=skip_bias_add,
|
||||
params_dtype=params_dtype,
|
||||
quant_config=quant_config,
|
||||
prefix=prefix,
|
||||
return_bias=return_bias,
|
||||
disable_tp=disable_tp)
|
||||
AscendColumnParallelLinear.__init__(
|
||||
self,
|
||||
input_size=input_size,
|
||||
output_size=output_size,
|
||||
bias=bias,
|
||||
gather_output=False,
|
||||
skip_bias_add=skip_bias_add,
|
||||
params_dtype=params_dtype,
|
||||
quant_config=quant_config,
|
||||
prefix=prefix,
|
||||
return_bias=return_bias,
|
||||
disable_tp=disable_tp,
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_,
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
if self.custom_op is not None:
|
||||
return self.custom_op.apply(input_)
|
||||
|
||||
@@ -178,35 +179,36 @@ class AscendMergedColumnParallelLinear(MergedColumnParallelLinear):
|
||||
bias: bool = True,
|
||||
gather_output: bool = False,
|
||||
skip_bias_add: bool = False,
|
||||
params_dtype: Optional[torch.dtype] = None,
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
params_dtype: torch.dtype | None = None,
|
||||
quant_config: QuantizationConfig | None = None,
|
||||
prefix: str = "",
|
||||
*,
|
||||
return_bias: bool = True,
|
||||
disable_tp: bool = False,
|
||||
):
|
||||
self.custom_op, self.tp_rank, self.tp_size = get_parallel_op(
|
||||
disable_tp, prefix, self, "column")
|
||||
# TODO(realliujiaxu): Replace the initialization code below with super().__init__ after linear of vllm supports custom comm group
|
||||
self.custom_op, self.tp_rank, self.tp_size = get_parallel_op(disable_tp, prefix, self, "column")
|
||||
# TODO(realliujiaxu): Replace the initialization code below with super().__init__ after
|
||||
# linear of vllm supports custom comm group
|
||||
self.output_sizes = output_sizes
|
||||
assert all(output_size % self.tp_size == 0
|
||||
for output_size in output_sizes)
|
||||
AscendColumnParallelLinear.__init__(self,
|
||||
input_size=input_size,
|
||||
output_size=sum(output_sizes),
|
||||
bias=bias,
|
||||
gather_output=gather_output,
|
||||
skip_bias_add=skip_bias_add,
|
||||
params_dtype=params_dtype,
|
||||
quant_config=quant_config,
|
||||
prefix=prefix,
|
||||
return_bias=return_bias,
|
||||
disable_tp=disable_tp)
|
||||
assert all(output_size % self.tp_size == 0 for output_size in output_sizes)
|
||||
AscendColumnParallelLinear.__init__(
|
||||
self,
|
||||
input_size=input_size,
|
||||
output_size=sum(output_sizes),
|
||||
bias=bias,
|
||||
gather_output=gather_output,
|
||||
skip_bias_add=skip_bias_add,
|
||||
params_dtype=params_dtype,
|
||||
quant_config=quant_config,
|
||||
prefix=prefix,
|
||||
return_bias=return_bias,
|
||||
disable_tp=disable_tp,
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_,
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
if self.custom_op is not None:
|
||||
return self.custom_op.apply(input_)
|
||||
|
||||
@@ -229,9 +231,9 @@ class AscendRowParallelLinear(RowParallelLinear):
|
||||
bias: bool = True,
|
||||
input_is_parallel: bool = True,
|
||||
skip_bias_add: bool = False,
|
||||
params_dtype: Optional[torch.dtype] = None,
|
||||
params_dtype: torch.dtype | None = None,
|
||||
reduce_results: bool = True,
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
quant_config: QuantizationConfig | None = None,
|
||||
prefix: str = "",
|
||||
*,
|
||||
return_bias: bool = True,
|
||||
@@ -247,23 +249,25 @@ class AscendRowParallelLinear(RowParallelLinear):
|
||||
self.unique_prefix = unique_prefix
|
||||
compilation_config.static_forward_context[unique_prefix] = self
|
||||
|
||||
self.custom_op, self.tp_rank, self.tp_size = get_parallel_op(
|
||||
disable_tp, prefix, self, "row")
|
||||
# TODO(realliujiaxu): Replace the initialization code below with super().__init__ after linear of vllm supports custom comm group
|
||||
self.custom_op, self.tp_rank, self.tp_size = get_parallel_op(disable_tp, prefix, self, "row")
|
||||
# TODO(realliujiaxu): Replace the initialization code below with super().__init__ after
|
||||
# linear of vllm supports custom comm group
|
||||
# Divide the weight matrix along the first dimension.
|
||||
self.input_size_per_partition = divide(input_size, self.tp_size)
|
||||
self.output_size_per_partition = output_size
|
||||
self.output_partition_sizes = [output_size]
|
||||
|
||||
AscendLinearBase.__init__(self,
|
||||
input_size,
|
||||
output_size,
|
||||
skip_bias_add,
|
||||
params_dtype,
|
||||
quant_config,
|
||||
prefix,
|
||||
return_bias=return_bias,
|
||||
disable_tp=disable_tp)
|
||||
AscendLinearBase.__init__(
|
||||
self,
|
||||
input_size,
|
||||
output_size,
|
||||
skip_bias_add,
|
||||
params_dtype,
|
||||
quant_config,
|
||||
prefix,
|
||||
return_bias=return_bias,
|
||||
disable_tp=disable_tp,
|
||||
)
|
||||
|
||||
self.input_is_parallel = input_is_parallel
|
||||
self.reduce_results = reduce_results
|
||||
@@ -277,19 +281,23 @@ class AscendRowParallelLinear(RowParallelLinear):
|
||||
output_size=self.output_size,
|
||||
params_dtype=self.params_dtype,
|
||||
weight_loader=(
|
||||
self.weight_loader_v2 if self.quant_method.__class__.__name__
|
||||
in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader))
|
||||
self.weight_loader_v2
|
||||
if self.quant_method.__class__.__name__ in WEIGHT_LOADER_V2_SUPPORTED
|
||||
else self.weight_loader
|
||||
),
|
||||
)
|
||||
if not reduce_results and (bias and not skip_bias_add):
|
||||
raise ValueError("When not reduce the results, adding bias to the "
|
||||
"results can lead to incorrect results")
|
||||
raise ValueError("When not reduce the results, adding bias to the results can lead to incorrect results")
|
||||
|
||||
if bias:
|
||||
self.bias = Parameter(
|
||||
torch.empty(self.output_size, dtype=params_dtype))
|
||||
set_weight_attrs(self.bias, {
|
||||
"output_dim": 0,
|
||||
"weight_loader": self.weight_loader,
|
||||
})
|
||||
self.bias = Parameter(torch.empty(self.output_size, dtype=params_dtype))
|
||||
set_weight_attrs(
|
||||
self.bias,
|
||||
{
|
||||
"output_dim": 0,
|
||||
"weight_loader": self.weight_loader,
|
||||
},
|
||||
)
|
||||
else:
|
||||
self.register_parameter("bias", None)
|
||||
|
||||
@@ -300,7 +308,7 @@ class AscendRowParallelLinear(RowParallelLinear):
|
||||
self,
|
||||
input_,
|
||||
**kwargs,
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
if self.custom_op is not None:
|
||||
return self.custom_op.apply(input_)
|
||||
|
||||
@@ -321,36 +329,36 @@ class AscendColumnParallelLinear(ColumnParallelLinear):
|
||||
bias: bool = True,
|
||||
gather_output: bool = False,
|
||||
skip_bias_add: bool = False,
|
||||
params_dtype: Optional[torch.dtype] = None,
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
output_sizes: Optional[list[int]] = None,
|
||||
params_dtype: torch.dtype | None = None,
|
||||
quant_config: QuantizationConfig | None = None,
|
||||
output_sizes: list[int] | None = None,
|
||||
prefix: str = "",
|
||||
*,
|
||||
return_bias: bool = True,
|
||||
disable_tp: bool = False,
|
||||
):
|
||||
self.custom_op, self.tp_rank, self.tp_size = get_parallel_op(
|
||||
disable_tp, prefix, self, "column")
|
||||
# TODO(realliujiaxu): Replace the initialization code below with super().__init__ after linear of vllm supports custom comm group
|
||||
#
|
||||
self.custom_op, self.tp_rank, self.tp_size = get_parallel_op(disable_tp, prefix, self, "column")
|
||||
# TODO(realliujiaxu): Replace the initialization code below with super().__init__ after
|
||||
# linear of vllm supports custom comm group
|
||||
self.input_size_per_partition = input_size
|
||||
self.output_size_per_partition = divide(output_size, self.tp_size)
|
||||
self.output_partition_sizes = [self.output_size_per_partition]
|
||||
# If QKV or MergedColumn, use output size of each partition.
|
||||
if hasattr(self, "output_sizes"):
|
||||
self.output_partition_sizes = [
|
||||
divide(output_size, self.tp_size)
|
||||
for output_size in self.output_sizes
|
||||
]
|
||||
self.output_partition_sizes = [divide(output_size, self.tp_size) for output_size in self.output_sizes]
|
||||
|
||||
AscendLinearBase.__init__(self,
|
||||
input_size,
|
||||
output_size,
|
||||
skip_bias_add,
|
||||
params_dtype,
|
||||
quant_config,
|
||||
prefix,
|
||||
return_bias=return_bias,
|
||||
disable_tp=disable_tp)
|
||||
AscendLinearBase.__init__(
|
||||
self,
|
||||
input_size,
|
||||
output_size,
|
||||
skip_bias_add,
|
||||
params_dtype,
|
||||
quant_config,
|
||||
prefix,
|
||||
return_bias=return_bias,
|
||||
disable_tp=disable_tp,
|
||||
)
|
||||
|
||||
self.gather_output = gather_output
|
||||
|
||||
@@ -366,16 +374,20 @@ class AscendColumnParallelLinear(ColumnParallelLinear):
|
||||
output_size=self.output_size,
|
||||
params_dtype=self.params_dtype,
|
||||
weight_loader=(
|
||||
self.weight_loader_v2 if self.quant_method.__class__.__name__
|
||||
in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader))
|
||||
self.weight_loader_v2
|
||||
if self.quant_method.__class__.__name__ in WEIGHT_LOADER_V2_SUPPORTED
|
||||
else self.weight_loader
|
||||
),
|
||||
)
|
||||
if bias:
|
||||
self.bias = Parameter(
|
||||
torch.empty(self.output_size_per_partition,
|
||||
dtype=params_dtype))
|
||||
set_weight_attrs(self.bias, {
|
||||
"output_dim": 0,
|
||||
"weight_loader": self.weight_loader,
|
||||
})
|
||||
self.bias = Parameter(torch.empty(self.output_size_per_partition, dtype=params_dtype))
|
||||
set_weight_attrs(
|
||||
self.bias,
|
||||
{
|
||||
"output_dim": 0,
|
||||
"weight_loader": self.weight_loader,
|
||||
},
|
||||
)
|
||||
else:
|
||||
self.register_parameter("bias", None)
|
||||
|
||||
@@ -385,7 +397,7 @@ class AscendColumnParallelLinear(ColumnParallelLinear):
|
||||
def forward(
|
||||
self,
|
||||
input_,
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
if self.custom_op is not None:
|
||||
return self.custom_op.apply(input_)
|
||||
|
||||
@@ -414,8 +426,8 @@ class AscendReplicatedLinear(ReplicatedLinear):
|
||||
output_size: int,
|
||||
bias: bool = True,
|
||||
skip_bias_add: bool = False,
|
||||
params_dtype: Optional[torch.dtype] = None,
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
params_dtype: torch.dtype | None = None,
|
||||
quant_config: QuantizationConfig | None = None,
|
||||
prefix: str = "",
|
||||
*,
|
||||
return_bias: bool = True,
|
||||
@@ -428,32 +440,39 @@ class AscendReplicatedLinear(ReplicatedLinear):
|
||||
else:
|
||||
self.output_partition_sizes = [output_size]
|
||||
|
||||
AscendLinearBase.__init__(self,
|
||||
input_size,
|
||||
output_size,
|
||||
skip_bias_add,
|
||||
params_dtype,
|
||||
quant_config,
|
||||
prefix=prefix,
|
||||
return_bias=return_bias,
|
||||
disable_tp=disable_tp)
|
||||
AscendLinearBase.__init__(
|
||||
self,
|
||||
input_size,
|
||||
output_size,
|
||||
skip_bias_add,
|
||||
params_dtype,
|
||||
quant_config,
|
||||
prefix=prefix,
|
||||
return_bias=return_bias,
|
||||
disable_tp=disable_tp,
|
||||
)
|
||||
|
||||
# All the linear layer supports quant method.
|
||||
assert self.quant_method is not None
|
||||
self.quant_method.create_weights(self,
|
||||
self.input_size, [self.output_size],
|
||||
self.input_size,
|
||||
self.output_size,
|
||||
self.params_dtype,
|
||||
weight_loader=self.weight_loader)
|
||||
self.quant_method.create_weights(
|
||||
self,
|
||||
self.input_size,
|
||||
[self.output_size],
|
||||
self.input_size,
|
||||
self.output_size,
|
||||
self.params_dtype,
|
||||
weight_loader=self.weight_loader,
|
||||
)
|
||||
|
||||
if bias:
|
||||
self.bias = Parameter(
|
||||
torch.empty(self.output_size, dtype=self.params_dtype))
|
||||
set_weight_attrs(self.bias, {
|
||||
"output_dim": 0,
|
||||
"weight_loader": self.weight_loader,
|
||||
})
|
||||
self.bias = Parameter(torch.empty(self.output_size, dtype=self.params_dtype))
|
||||
set_weight_attrs(
|
||||
self.bias,
|
||||
{
|
||||
"output_dim": 0,
|
||||
"weight_loader": self.weight_loader,
|
||||
},
|
||||
)
|
||||
else:
|
||||
self.register_parameter("bias", None)
|
||||
|
||||
@@ -463,7 +482,7 @@ class AscendReplicatedLinear(ReplicatedLinear):
|
||||
def forward(
|
||||
self,
|
||||
input_,
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
if self.custom_op is not None:
|
||||
return self.custom_op.apply(input_)
|
||||
|
||||
|
||||
@@ -31,16 +31,18 @@ CustomLinearOp
|
||||
└── CustomReplicatedOp
|
||||
How to extend a new linear op? Taking column parallel op as an example:
|
||||
1. Inherit from CustomColumnParallelOp and create a new class MyColumnParallelOp
|
||||
2. [Optional] The default communication group is the TP group. If a custom communication group is needed, override the comm_group method
|
||||
2. [Optional] The default communication group is the TP group. If a custom communication group is needed,
|
||||
override the comm_group method
|
||||
3. Override the apply method according to requirements, which will replace the original linear.forward
|
||||
4. Add selection logic for MyColumnParallelOp in the get_column_parallel_op method, typically based on prefix and configuration judgments
|
||||
Row parallel op follows a similar approach - inherit from RowColumnParallelOp and register the new class in get_row_parallel_op.
|
||||
4. Add selection logic for MyColumnParallelOp in the get_column_parallel_op method, typically based on
|
||||
prefix and configuration judgments
|
||||
Row parallel op follows a similar approach - inherit from RowColumnParallelOp and register the new class in
|
||||
get_row_parallel_op.
|
||||
"""
|
||||
|
||||
import re
|
||||
from functools import lru_cache
|
||||
from types import SimpleNamespace
|
||||
from typing import Optional, Union
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
@@ -49,27 +51,37 @@ import torch_npu
|
||||
from torch import nn
|
||||
from torch.distributed import ProcessGroup
|
||||
from torch.nn.parameter import Parameter
|
||||
from vllm.distributed import (split_tensor_along_last_dim,
|
||||
tensor_model_parallel_all_reduce,
|
||||
tensor_model_parallel_reduce_scatter)
|
||||
from vllm.distributed import (
|
||||
split_tensor_along_last_dim,
|
||||
tensor_model_parallel_all_reduce,
|
||||
tensor_model_parallel_reduce_scatter,
|
||||
)
|
||||
from vllm.distributed.parallel_state import get_tp_group
|
||||
from vllm.forward_context import get_forward_context
|
||||
|
||||
from vllm_ascend import envs as envs_ascend
|
||||
from vllm_ascend.ascend_config import get_ascend_config
|
||||
from vllm_ascend.distributed.parallel_state import (get_flashcomm2_odp_group,
|
||||
get_flashcomm2_otp_group,
|
||||
get_mlp_tp_group,
|
||||
get_otp_group)
|
||||
from vllm_ascend.distributed.parallel_state import (
|
||||
get_flashcomm2_odp_group,
|
||||
get_flashcomm2_otp_group,
|
||||
get_mlp_tp_group,
|
||||
get_otp_group,
|
||||
)
|
||||
from vllm_ascend.ops.flashcomm2_oshard_manager import flashcomm2_oshard_manager
|
||||
from vllm_ascend.utils import (enable_dsa_cp, enable_dsa_cp_with_layer_shard, enable_sp, flashcomm2_enable,
|
||||
get_flashcomm2_reorgnized_batch_ids,
|
||||
matmul_allreduce_enable, mlp_tp_enable,
|
||||
oproj_tp_enable, shared_expert_dp_enabled,
|
||||
get_weight_prefetch_method)
|
||||
from vllm_ascend.utils import (
|
||||
enable_dsa_cp,
|
||||
enable_dsa_cp_with_layer_shard,
|
||||
enable_sp,
|
||||
flashcomm2_enable,
|
||||
get_flashcomm2_reorgnized_batch_ids,
|
||||
get_weight_prefetch_method,
|
||||
matmul_allreduce_enable,
|
||||
mlp_tp_enable,
|
||||
oproj_tp_enable,
|
||||
shared_expert_dp_enabled,
|
||||
)
|
||||
|
||||
|
||||
class CustomLinearOp:
|
||||
|
||||
def __init__(self, layer):
|
||||
self.layer = layer
|
||||
self.bias = None
|
||||
@@ -112,7 +124,6 @@ class CustomLinearOp:
|
||||
|
||||
|
||||
class CustomColumnParallelOp(CustomLinearOp):
|
||||
|
||||
def __init__(self, layer):
|
||||
super().__init__(layer)
|
||||
self.gather_output = None
|
||||
@@ -123,7 +134,6 @@ class CustomColumnParallelOp(CustomLinearOp):
|
||||
|
||||
|
||||
class CustomRowParallelOp(CustomLinearOp):
|
||||
|
||||
def __init__(self, layer):
|
||||
super().__init__(layer)
|
||||
self.reduce_results = None
|
||||
@@ -140,7 +150,9 @@ class CustomRowParallelOp(CustomLinearOp):
|
||||
output, output_bias = self.apply_impl(input_)
|
||||
weight_prefetch_method = get_weight_prefetch_method()
|
||||
if weight_prefetch_method:
|
||||
weight_prefetch_method.maybe_prefetch_mlp_weight_preprocess(weight_prefetch_method.MLP_GATE_UP, output, self.prefix)
|
||||
weight_prefetch_method.maybe_prefetch_mlp_weight_preprocess(
|
||||
weight_prefetch_method.MLP_GATE_UP, output, self.prefix
|
||||
)
|
||||
|
||||
if not self.return_bias:
|
||||
return output
|
||||
@@ -148,7 +160,6 @@ class CustomRowParallelOp(CustomLinearOp):
|
||||
|
||||
|
||||
class CustomReplicatedOp(CustomLinearOp):
|
||||
|
||||
def apply_impl(self, input_):
|
||||
bias = self.bias if not self.skip_bias_add else None
|
||||
assert self.quant_method is not None
|
||||
@@ -160,7 +171,6 @@ class CustomReplicatedOp(CustomLinearOp):
|
||||
|
||||
|
||||
class MLPColumnParallelOp(CustomColumnParallelOp):
|
||||
|
||||
def __init__(self, layer):
|
||||
super().__init__(layer)
|
||||
|
||||
@@ -171,7 +181,7 @@ class MLPColumnParallelOp(CustomColumnParallelOp):
|
||||
def apply_impl(
|
||||
self,
|
||||
input_: torch.Tensor,
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
bias = self.bias if not self.skip_bias_add else None
|
||||
# Matrix multiply.
|
||||
assert self.quant_method is not None
|
||||
@@ -183,7 +193,6 @@ class MLPColumnParallelOp(CustomColumnParallelOp):
|
||||
|
||||
|
||||
class MLPRowParallelOp(CustomRowParallelOp):
|
||||
|
||||
def __init__(self, layer):
|
||||
super().__init__(layer)
|
||||
|
||||
@@ -191,22 +200,16 @@ class MLPRowParallelOp(CustomRowParallelOp):
|
||||
def comm_group(self):
|
||||
return get_mlp_tp_group()
|
||||
|
||||
def apply_impl(
|
||||
self, input_: torch.Tensor
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
def apply_impl(self, input_: torch.Tensor) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
if self.input_is_parallel:
|
||||
input_parallel = input_
|
||||
else:
|
||||
splitted_input = split_tensor_along_last_dim(
|
||||
input_, num_partitions=self.tp_size)
|
||||
splitted_input = split_tensor_along_last_dim(input_, num_partitions=self.tp_size)
|
||||
input_parallel = splitted_input[self.tp_rank].contiguous()
|
||||
|
||||
assert self.quant_method is not None
|
||||
bias_ = None if (self.tp_rank > 0
|
||||
or self.skip_bias_add) else self.layer.bias
|
||||
output_parallel = self.quant_method.apply(self.layer,
|
||||
input_parallel,
|
||||
bias=bias_)
|
||||
bias_ = None if (self.tp_rank > 0 or self.skip_bias_add) else self.layer.bias
|
||||
output_parallel = self.quant_method.apply(self.layer, input_parallel, bias=bias_)
|
||||
output = self.comm_group.reduce_scatter(output_parallel, 0)
|
||||
|
||||
output_bias = self.bias if self.skip_bias_add else None
|
||||
@@ -214,7 +217,6 @@ class MLPRowParallelOp(CustomRowParallelOp):
|
||||
|
||||
|
||||
class OProjRowParallelOp(CustomRowParallelOp):
|
||||
|
||||
def __init__(self, layer):
|
||||
super().__init__(layer)
|
||||
|
||||
@@ -225,13 +227,11 @@ class OProjRowParallelOp(CustomRowParallelOp):
|
||||
def apply_impl(
|
||||
self,
|
||||
input_: torch.Tensor,
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
|
||||
) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
if self.input_is_parallel:
|
||||
input_parallel = input_
|
||||
else:
|
||||
splitted_input = split_tensor_along_last_dim(
|
||||
input_, num_partitions=self.tp_size)
|
||||
splitted_input = split_tensor_along_last_dim(input_, num_partitions=self.tp_size)
|
||||
input_parallel = splitted_input[self.tp_rank].contiguous()
|
||||
|
||||
# Prepare tensors for all-to-all communication
|
||||
@@ -241,27 +241,19 @@ class OProjRowParallelOp(CustomRowParallelOp):
|
||||
|
||||
# Reshape tensor for efficient cross-device transfer:
|
||||
# [batch, dim] -> [tp_size, batch, chunk] -> flattened
|
||||
send_buf = (input_parallel.reshape(-1,
|
||||
self.tp_size, chunk_size).transpose(
|
||||
0, 1).contiguous().view(-1))
|
||||
send_buf = input_parallel.reshape(-1, self.tp_size, chunk_size).transpose(0, 1).contiguous().view(-1)
|
||||
|
||||
# Create receive buffer
|
||||
recv_buf = torch.empty(total_batch_size * chunk_size,
|
||||
dtype=input_parallel.dtype,
|
||||
device=input_parallel.device)
|
||||
recv_buf = torch.empty(total_batch_size * chunk_size, dtype=input_parallel.dtype, device=input_parallel.device)
|
||||
|
||||
# Perform all-to-all communication
|
||||
dist.all_to_all_single(recv_buf,
|
||||
send_buf,
|
||||
group=self.comm_group.device_group)
|
||||
dist.all_to_all_single(recv_buf, send_buf, group=self.comm_group.device_group)
|
||||
input_parallel = recv_buf.view(total_batch_size, chunk_size)
|
||||
|
||||
# Only fuse bias add for rank 0 to avoid duplicate bias addition in TP>1
|
||||
bias_ = None if (self.tp_rank > 0 or self.skip_bias_add) else self.bias
|
||||
assert self.quant_method is not None
|
||||
output_parallel = self.quant_method.apply(self.layer,
|
||||
input_parallel,
|
||||
bias=bias_)
|
||||
output_parallel = self.quant_method.apply(self.layer, input_parallel, bias=bias_)
|
||||
|
||||
# otp-specific: Combine partial results across devices
|
||||
output = self.comm_group.reduce_scatter(output_parallel, dim=0)
|
||||
@@ -278,14 +270,12 @@ class OProjRowParallelOp(CustomRowParallelOp):
|
||||
|
||||
|
||||
class Flashcomm2OProjRowParallelOp(CustomRowParallelOp):
|
||||
|
||||
def __init__(self, layer):
|
||||
super().__init__(layer)
|
||||
self.odp_group = get_flashcomm2_odp_group()
|
||||
self.odp_size = self.odp_group.world_size
|
||||
self.otp_size = get_ascend_config().flashcomm2_oproj_tensor_parallel_size
|
||||
self.reorgnized_batch_ids = get_flashcomm2_reorgnized_batch_ids(
|
||||
get_tp_group().world_size)
|
||||
self.reorgnized_batch_ids = get_flashcomm2_reorgnized_batch_ids(get_tp_group().world_size)
|
||||
self.group_indices = torch.tensor(self.reorgnized_batch_ids).npu()
|
||||
self.layer._quant_comm_config = {}
|
||||
|
||||
@@ -308,32 +298,28 @@ class Flashcomm2OProjRowParallelOp(CustomRowParallelOp):
|
||||
def apply_impl(
|
||||
self,
|
||||
input_: torch.Tensor,
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
"""Linear layer for Flashcomm2.
|
||||
Input.ahspe = [batchsize*seqlength, headnum*headdim/TP]
|
||||
Output.shape = [(batchsize*seqlength+padsize)/TP, hiddensize]
|
||||
Input.ahspe = [batchsize*seqlength, headnum*headdim/TP]
|
||||
Output.shape = [(batchsize*seqlength+padsize)/TP, hiddensize]
|
||||
"""
|
||||
# Handle input parallelism - split or use as-is
|
||||
if self.input_is_parallel:
|
||||
input_parallel = input_
|
||||
else:
|
||||
tp_rank = self.tp_rank
|
||||
splitted_input = split_tensor_along_last_dim(
|
||||
input_, num_partitions=self.tp_size)
|
||||
splitted_input = split_tensor_along_last_dim(input_, num_partitions=self.tp_size)
|
||||
input_parallel = splitted_input[tp_rank].contiguous()
|
||||
|
||||
# padding for all-to-all
|
||||
forward_context = get_forward_context()
|
||||
num_padding_tokens = forward_context.pad_size
|
||||
if num_padding_tokens > 0:
|
||||
input_parallel = nn.functional.pad(input_parallel,
|
||||
(0, 0, 0, num_padding_tokens))
|
||||
input_parallel = nn.functional.pad(input_parallel, (0, 0, 0, num_padding_tokens))
|
||||
|
||||
def otp_maybe_quant_comm(x):
|
||||
|
||||
# Reorganize the tensor so that the batch id and rank id correspond to each other.
|
||||
chunk_num = len(self.reorgnized_batch_ids) * len(
|
||||
self.reorgnized_batch_ids[0])
|
||||
chunk_num = len(self.reorgnized_batch_ids) * len(self.reorgnized_batch_ids[0])
|
||||
batch_size = x.size(0)
|
||||
|
||||
assert batch_size % chunk_num == 0, f"Batch_size({batch_size}) must be divisible by chunk_num({chunk_num})"
|
||||
@@ -352,26 +338,19 @@ class Flashcomm2OProjRowParallelOp(CustomRowParallelOp):
|
||||
total_intermediate_size = local_intermediate_size * all2all_tp_size
|
||||
|
||||
# Create receive buffer
|
||||
recv_buf = torch.empty(total_intermediate_size * chunk_size,
|
||||
dtype=x.dtype,
|
||||
device=x.device)
|
||||
recv_buf = torch.empty(total_intermediate_size * chunk_size, dtype=x.dtype, device=x.device)
|
||||
|
||||
# Perform all-to-all communication
|
||||
dist.all_to_all_single(recv_buf,
|
||||
send_buf,
|
||||
group=self.odp_group.device_group)
|
||||
dist.all_to_all_single(recv_buf, send_buf, group=self.odp_group.device_group)
|
||||
|
||||
return recv_buf.view(all2all_tp_size, chunk_size,
|
||||
-1).transpose(0, 1).reshape(chunk_size, -1)
|
||||
return recv_buf.view(all2all_tp_size, chunk_size, -1).transpose(0, 1).reshape(chunk_size, -1)
|
||||
|
||||
if not hasattr(self, "_quant_comm_config"):
|
||||
self.layer._quant_comm_config = {}
|
||||
self.layer._quant_comm_config[
|
||||
"communication_fn"] = otp_maybe_quant_comm
|
||||
actual_quant_method = getattr(self.quant_method, 'quant_method',
|
||||
self.quant_method)
|
||||
from vllm_ascend.quantization.methods.w8a8_static import \
|
||||
AscendW8A8LinearMethod
|
||||
self.layer._quant_comm_config["communication_fn"] = otp_maybe_quant_comm
|
||||
actual_quant_method = getattr(self.quant_method, "quant_method", self.quant_method)
|
||||
from vllm_ascend.quantization.methods.w8a8_static import AscendW8A8LinearMethod
|
||||
|
||||
if not isinstance(actual_quant_method, AscendW8A8LinearMethod):
|
||||
# Check if w8a8 quantization is enabled. If not, communicate immediately.
|
||||
input_parallel = otp_maybe_quant_comm(input_parallel)
|
||||
@@ -382,9 +361,7 @@ class Flashcomm2OProjRowParallelOp(CustomRowParallelOp):
|
||||
# bias will not get added more than once in TP>1 case)
|
||||
bias_ = None if (self.tp_rank > 0 or self.skip_bias_add) else self.bias
|
||||
|
||||
output_parallel = self.quant_method.apply(self.layer,
|
||||
input_parallel,
|
||||
bias=bias_)
|
||||
output_parallel = self.quant_method.apply(self.layer, input_parallel, bias=bias_)
|
||||
# output_parallel shape: [bs/(TP/flashcomm2_otp_size), hiddenstate]
|
||||
if self.tp_size > 1:
|
||||
# flashcomm2 with reduce-scatter
|
||||
@@ -408,8 +385,7 @@ class Flashcomm2OProjRowParallelOp(CustomRowParallelOp):
|
||||
self.input_is_parallel = self.layer.input_is_parallel
|
||||
self.input_size_per_partition = self.layer.input_size_per_partition
|
||||
if flashcomm2_oshard_manager.flashcomm2_oshard_enable():
|
||||
flashcomm2_oshard_manager.register_layer(self.layer,
|
||||
prefetch_step=1)
|
||||
flashcomm2_oshard_manager.register_layer(self.layer, prefetch_step=1)
|
||||
|
||||
|
||||
class MatmulAllreduceRowParallelOp(CustomRowParallelOp):
|
||||
@@ -419,28 +395,22 @@ class MatmulAllreduceRowParallelOp(CustomRowParallelOp):
|
||||
super().__init__(layer)
|
||||
self.hcomm_info = self.get_hcomm_info(self.comm_group.device_group)
|
||||
|
||||
def apply_impl(
|
||||
self, input_: torch.Tensor
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
def apply_impl(self, input_: torch.Tensor) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
if self.input_is_parallel:
|
||||
input_parallel = input_
|
||||
else:
|
||||
splitted_input = split_tensor_along_last_dim(
|
||||
input_, num_partitions=self.tp_size)
|
||||
splitted_input = split_tensor_along_last_dim(input_, num_partitions=self.tp_size)
|
||||
input_parallel = splitted_input[self.tp_rank].contiguous()
|
||||
"""Calculate the output tensor of forward by considering
|
||||
fusing communication and computation."""
|
||||
bias_ = None if (self.tp_rank > 0 or self.skip_bias_add) else self.bias
|
||||
if self.reduce_results and self.tp_size > 1:
|
||||
output = torch_npu.npu_mm_all_reduce_base(input_parallel,
|
||||
self.layer.weight.t(),
|
||||
self.hcomm_info,
|
||||
bias=bias_)
|
||||
output = torch_npu.npu_mm_all_reduce_base(
|
||||
input_parallel, self.layer.weight.t(), self.hcomm_info, bias=bias_
|
||||
)
|
||||
else:
|
||||
assert self.quant_method is not None
|
||||
output = self.quant_method.apply(self.layer,
|
||||
input_parallel,
|
||||
bias=bias_)
|
||||
output = self.quant_method.apply(self.layer, input_parallel, bias=bias_)
|
||||
|
||||
output_bias = self.bias if self.skip_bias_add else None
|
||||
return output, output_bias
|
||||
@@ -454,18 +424,14 @@ class MatmulAllreduceRowParallelOp(CustomRowParallelOp):
|
||||
rank = torch.distributed.get_rank(group)
|
||||
if torch.__version__ > "2.0":
|
||||
global_rank = torch.distributed.get_global_rank(group, rank)
|
||||
cls._HCOMM_INFO = group._get_backend(
|
||||
torch.device("npu")).get_hccl_comm_name(global_rank)
|
||||
cls._HCOMM_INFO = group._get_backend(torch.device("npu")).get_hccl_comm_name(global_rank)
|
||||
else:
|
||||
cls._HCOMM_INFO = group.get_hccl_comm_name(rank)
|
||||
return cls._HCOMM_INFO
|
||||
|
||||
|
||||
class SequenceColumnParallelOp(CustomColumnParallelOp):
|
||||
|
||||
def apply_impl(
|
||||
self, input_: torch.Tensor
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
def apply_impl(self, input_: torch.Tensor) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
"""Linear layer with column parallelism.
|
||||
|
||||
Implemented multiple optimization projects for dense models, such as FlashComm and
|
||||
@@ -490,13 +456,10 @@ class SequenceColumnParallelOp(CustomColumnParallelOp):
|
||||
|
||||
|
||||
class Flashcomm2OshardQKVParallelOp(CustomColumnParallelOp):
|
||||
|
||||
def __init__(self, layer):
|
||||
super().__init__(layer)
|
||||
|
||||
def apply_impl(
|
||||
self, input_: torch.Tensor
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
def apply_impl(self, input_: torch.Tensor) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
"""Column-parallel linear with FlashComm2 OShard optimization."""
|
||||
|
||||
bias = self.bias if not self.skip_bias_add else None
|
||||
@@ -505,12 +468,10 @@ class Flashcomm2OshardQKVParallelOp(CustomColumnParallelOp):
|
||||
assert self.quant_method is not None
|
||||
|
||||
if enable_sp():
|
||||
input_ = torch.ops.vllm.maybe_all_gather_and_maybe_unpad(
|
||||
input_, True)
|
||||
input_ = torch.ops.vllm.maybe_all_gather_and_maybe_unpad(input_, True)
|
||||
|
||||
# Trigger async broadcast before matmul to overlap communication.
|
||||
flashcomm2_oshard_manager.trigger_broadcast_for_layer(
|
||||
self.layer.prefix)
|
||||
flashcomm2_oshard_manager.trigger_broadcast_for_layer(self.layer.prefix)
|
||||
|
||||
output_parallel = self.quant_method.apply(self.layer, input_, bias)
|
||||
if self.gather_output and self.tp_size > 1:
|
||||
@@ -523,14 +484,11 @@ class Flashcomm2OshardQKVParallelOp(CustomColumnParallelOp):
|
||||
|
||||
|
||||
class SequenceRowParallelOp(CustomRowParallelOp):
|
||||
|
||||
def __init__(self, layer):
|
||||
super().__init__(layer)
|
||||
self.unique_prefix = None
|
||||
|
||||
def apply_impl(
|
||||
self, input_: torch.Tensor
|
||||
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
|
||||
def apply_impl(self, input_: torch.Tensor) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
|
||||
"""Linear layer with column parallelism.
|
||||
|
||||
Implemented multiple optimization projects for dense models, such as FlashComm and
|
||||
@@ -540,26 +498,21 @@ class SequenceRowParallelOp(CustomRowParallelOp):
|
||||
if self.input_is_parallel:
|
||||
input_parallel = input_
|
||||
else:
|
||||
splitted_input = split_tensor_along_last_dim(
|
||||
input_, num_partitions=self.tp_size)
|
||||
splitted_input = split_tensor_along_last_dim(input_, num_partitions=self.tp_size)
|
||||
input_parallel = splitted_input[self.tp_rank].contiguous()
|
||||
|
||||
assert self.quant_method is not None
|
||||
bias_ = None if (self.tp_rank > 0 or self.skip_bias_add) else self.bias
|
||||
|
||||
if self.tp_size == 1 or not self.reduce_results:
|
||||
output = self.quant_method.apply(self.layer,
|
||||
input_parallel,
|
||||
bias=bias_)
|
||||
output = self.quant_method.apply(self.layer, input_parallel, bias=bias_)
|
||||
else:
|
||||
output = torch.ops.vllm.matmul_and_reduce(input_parallel,
|
||||
self.unique_prefix)
|
||||
output = torch.ops.vllm.matmul_and_reduce(input_parallel, self.unique_prefix)
|
||||
|
||||
output_bias = self.bias if self.skip_bias_add else None
|
||||
return output, output_bias
|
||||
|
||||
def matmul_and_reduce(self, input_parallel: torch.Tensor,
|
||||
bias_: Optional[Parameter]) -> torch.Tensor:
|
||||
def matmul_and_reduce(self, input_parallel: torch.Tensor, bias_: Parameter | None) -> torch.Tensor:
|
||||
assert self.quant_method is not None
|
||||
try:
|
||||
forward_context = get_forward_context()
|
||||
@@ -572,29 +525,24 @@ class SequenceRowParallelOp(CustomRowParallelOp):
|
||||
x = input_parallel
|
||||
|
||||
if not sp_enabled:
|
||||
output_parallel = self.layer.quant_method.apply(self.layer,
|
||||
x,
|
||||
bias=bias_)
|
||||
output_parallel = self.layer.quant_method.apply(self.layer, x, bias=bias_)
|
||||
return tensor_model_parallel_all_reduce(output_parallel)
|
||||
|
||||
pad_size = forward_context.pad_size
|
||||
if pad_size > 0 and not (enable_dsa_cp()
|
||||
and "o_proj" in self.layer.prefix):
|
||||
if pad_size > 0 and not (enable_dsa_cp() and "o_proj" in self.layer.prefix):
|
||||
x = F.pad(x, (0, 0, 0, pad_size))
|
||||
|
||||
world_size = self.layer.tp_size
|
||||
comm_mode = "aiv"
|
||||
hcom_name = get_tp_group().device_group._get_backend(
|
||||
torch.device('npu')).get_hccl_comm_name(self.layer.tp_rank)
|
||||
hcom_name = get_tp_group().device_group._get_backend(torch.device("npu")).get_hccl_comm_name(self.layer.tp_rank)
|
||||
|
||||
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
|
||||
|
||||
from vllm_ascend.quantization.methods import AscendW8A8LinearMethod
|
||||
from vllm_ascend.quantization.method_adapters import AscendLinearMethod
|
||||
from vllm_ascend.quantization.methods import AscendW8A8LinearMethod
|
||||
|
||||
# For unquant
|
||||
if mmrs_fusion and isinstance(self.layer.quant_method,
|
||||
UnquantizedLinearMethod):
|
||||
if mmrs_fusion and isinstance(self.layer.quant_method, UnquantizedLinearMethod):
|
||||
output = torch_npu.npu_mm_reduce_scatter_base(
|
||||
x,
|
||||
self.layer.weight.t(),
|
||||
@@ -603,19 +551,22 @@ class SequenceRowParallelOp(CustomRowParallelOp):
|
||||
reduce_op="sum",
|
||||
bias=None,
|
||||
comm_turn=0,
|
||||
comm_mode=comm_mode)
|
||||
comm_mode=comm_mode,
|
||||
)
|
||||
if bias_ is not None:
|
||||
output.add_(bias_)
|
||||
# For w8a8 quant
|
||||
elif mmrs_fusion and (
|
||||
isinstance(self.layer.quant_method, AscendLinearMethod)
|
||||
and isinstance(self.layer.quant_method.quant_method,
|
||||
AscendW8A8LinearMethod)):
|
||||
isinstance(self.layer.quant_method, AscendLinearMethod)
|
||||
and isinstance(self.layer.quant_method.quant_method, AscendW8A8LinearMethod)
|
||||
):
|
||||
if x.dtype != torch.int8:
|
||||
x_quant = torch.ops.vllm.quantize(
|
||||
x, self.layer.aclnn_input_scale,
|
||||
x,
|
||||
self.layer.aclnn_input_scale,
|
||||
self.layer.aclnn_input_scale_reciprocal,
|
||||
self.layer.aclnn_input_offset)
|
||||
self.layer.aclnn_input_offset,
|
||||
)
|
||||
else:
|
||||
x_quant = x
|
||||
quant_bias = self.layer.quant_bias
|
||||
@@ -631,14 +582,11 @@ class SequenceRowParallelOp(CustomRowParallelOp):
|
||||
comm_turn=0,
|
||||
x2_scale=deq_scale,
|
||||
output_dtype=output_dtype,
|
||||
comm_mode=comm_mode)
|
||||
output = torch.add(
|
||||
output,
|
||||
torch.mul(quant_bias, deq_scale).to(self.layer.params_dtype))
|
||||
comm_mode=comm_mode,
|
||||
)
|
||||
output = torch.add(output, torch.mul(quant_bias, deq_scale).to(self.layer.params_dtype))
|
||||
else:
|
||||
output_parallel = self.layer.quant_method.apply(self.layer,
|
||||
x,
|
||||
bias=bias_)
|
||||
output_parallel = self.layer.quant_method.apply(self.layer, x, bias=bias_)
|
||||
output = tensor_model_parallel_reduce_scatter(output_parallel, 0)
|
||||
|
||||
return output
|
||||
@@ -651,13 +599,10 @@ class SequenceRowParallelOp(CustomRowParallelOp):
|
||||
|
||||
|
||||
class ShardedCPRowParallelOp(CustomRowParallelOp):
|
||||
|
||||
@property
|
||||
def comm_group(self):
|
||||
# fake comm group to bypass tp logic
|
||||
return SimpleNamespace(world_size=1,
|
||||
rank_in_group=0,
|
||||
device_group=None)
|
||||
return SimpleNamespace(world_size=1, rank_in_group=0, device_group=None)
|
||||
|
||||
def apply_impl(
|
||||
self,
|
||||
@@ -677,13 +622,10 @@ class ShardedCPRowParallelOp(CustomRowParallelOp):
|
||||
|
||||
|
||||
class ShardedCPColumnParallelOp(CustomColumnParallelOp):
|
||||
|
||||
@property
|
||||
def comm_group(self):
|
||||
# fake comm group to bypass tp logic
|
||||
return SimpleNamespace(world_size=1,
|
||||
rank_in_group=0,
|
||||
device_group=None)
|
||||
return SimpleNamespace(world_size=1, rank_in_group=0, device_group=None)
|
||||
|
||||
def apply_impl(
|
||||
self,
|
||||
@@ -700,12 +642,10 @@ class ShardedCPColumnParallelOp(CustomColumnParallelOp):
|
||||
|
||||
def _get_column_parallel_op(
|
||||
prefix, layer
|
||||
) -> Optional[Union[MLPColumnParallelOp, SequenceColumnParallelOp,
|
||||
ShardedCPColumnParallelOp, Flashcomm2OshardQKVParallelOp]]:
|
||||
) -> MLPColumnParallelOp | SequenceColumnParallelOp | ShardedCPColumnParallelOp | Flashcomm2OshardQKVParallelOp | None:
|
||||
if enable_dsa_cp() and ("q_b_proj" in prefix or "kv_b_proj" in prefix):
|
||||
return ShardedCPColumnParallelOp(layer)
|
||||
if "gate_up_proj" in prefix and mlp_tp_enable(
|
||||
) and not is_moe_layer(prefix):
|
||||
if "gate_up_proj" in prefix and mlp_tp_enable() and not is_moe_layer(prefix):
|
||||
return MLPColumnParallelOp(layer)
|
||||
if flashcomm2_oshard_manager.flashcomm2_oshard_enable():
|
||||
if any(p in prefix for p in ("qkv_proj", "conv1d", "query_key_value")):
|
||||
@@ -714,7 +654,7 @@ def _get_column_parallel_op(
|
||||
if "shared_expert" in prefix:
|
||||
return None
|
||||
sp_column_prefix = [
|
||||
"gate_up_proj", # first MLP of most LLMs
|
||||
"gate_up_proj", # first MLP of most LLMs
|
||||
"in_proj", # gated deltanet of Qwen3 Next
|
||||
"qkv_proj", # qkv linear of most LLMs
|
||||
"conv1d", # gated deltanet of Qwen3 Next
|
||||
@@ -729,9 +669,15 @@ def _get_column_parallel_op(
|
||||
|
||||
def _get_row_parallel_op(
|
||||
prefix, layer
|
||||
) -> Optional[Union[MLPRowParallelOp, OProjRowParallelOp,
|
||||
Flashcomm2OProjRowParallelOp, MatmulAllreduceRowParallelOp,
|
||||
SequenceRowParallelOp, ShardedCPRowParallelOp]]:
|
||||
) -> (
|
||||
MLPRowParallelOp
|
||||
| OProjRowParallelOp
|
||||
| Flashcomm2OProjRowParallelOp
|
||||
| MatmulAllreduceRowParallelOp
|
||||
| SequenceRowParallelOp
|
||||
| ShardedCPRowParallelOp
|
||||
| None
|
||||
):
|
||||
if enable_dsa_cp_with_layer_shard() and "o_proj" in prefix:
|
||||
return ShardedCPRowParallelOp(layer)
|
||||
if "down_proj" in prefix and mlp_tp_enable() and not is_moe_layer(prefix):
|
||||
@@ -760,16 +706,21 @@ def _get_row_parallel_op(
|
||||
|
||||
|
||||
def get_parallel_op(disable_tp, prefix, layer, direct):
|
||||
if disable_tp or ("shared_experts" in prefix
|
||||
and shared_expert_dp_enabled()):
|
||||
if disable_tp or ("shared_experts" in prefix and shared_expert_dp_enabled()):
|
||||
return None, 0, 1
|
||||
custom_op: Optional[Union[MLPColumnParallelOp, SequenceColumnParallelOp,
|
||||
MLPRowParallelOp, OProjRowParallelOp,
|
||||
Flashcomm2OProjRowParallelOp,
|
||||
Flashcomm2OshardQKVParallelOp,
|
||||
MatmulAllreduceRowParallelOp,
|
||||
SequenceRowParallelOp, ShardedCPRowParallelOp,
|
||||
ShardedCPColumnParallelOp]] = None
|
||||
custom_op: (
|
||||
MLPColumnParallelOp
|
||||
| SequenceColumnParallelOp
|
||||
| MLPRowParallelOp
|
||||
| OProjRowParallelOp
|
||||
| Flashcomm2OProjRowParallelOp
|
||||
| Flashcomm2OshardQKVParallelOp
|
||||
| MatmulAllreduceRowParallelOp
|
||||
| SequenceRowParallelOp
|
||||
| ShardedCPRowParallelOp
|
||||
| ShardedCPColumnParallelOp
|
||||
| None
|
||||
) = None
|
||||
if direct == "row":
|
||||
custom_op = _get_row_parallel_op(prefix, layer)
|
||||
|
||||
@@ -782,8 +733,7 @@ def get_parallel_op(disable_tp, prefix, layer, direct):
|
||||
return None, get_tp_group().rank_in_group, get_tp_group().world_size
|
||||
|
||||
|
||||
def get_replicated_op(disable_tp, prefix,
|
||||
layer) -> Optional[Union[CustomReplicatedOp]]:
|
||||
def get_replicated_op(disable_tp, prefix, layer) -> CustomReplicatedOp | None:
|
||||
if disable_tp:
|
||||
return None
|
||||
|
||||
@@ -791,24 +741,22 @@ def get_replicated_op(disable_tp, prefix,
|
||||
|
||||
|
||||
def is_moe_layer(prefix: str) -> bool:
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def get_moe_params():
|
||||
from vllm.config import get_current_vllm_config
|
||||
|
||||
vllm_config = get_current_vllm_config()
|
||||
config = vllm_config.model_config.hf_text_config
|
||||
n_routed_experts = getattr(config, 'n_routed_experts', 0)
|
||||
first_k_dense_replace = getattr(config, 'first_k_dense_replace',
|
||||
float('inf'))
|
||||
moe_layer_freq = getattr(config, 'moe_layer_freq', 1)
|
||||
n_routed_experts = getattr(config, "n_routed_experts", 0)
|
||||
first_k_dense_replace = getattr(config, "first_k_dense_replace", float("inf"))
|
||||
moe_layer_freq = getattr(config, "moe_layer_freq", 1)
|
||||
return n_routed_experts, first_k_dense_replace, moe_layer_freq
|
||||
|
||||
match = re.search(r'layers\.(\d+)\.', prefix)
|
||||
match = re.search(r"layers\.(\d+)\.", prefix)
|
||||
if match is None:
|
||||
return False
|
||||
layer_idx = int(match.group(1))
|
||||
|
||||
n_routed_experts, first_k_dense_replace, moe_layer_freq = get_moe_params()
|
||||
|
||||
return (n_routed_experts is not None and layer_idx >= first_k_dense_replace
|
||||
and layer_idx % moe_layer_freq == 0)
|
||||
return n_routed_experts is not None and layer_idx >= first_k_dense_replace and layer_idx % moe_layer_freq == 0
|
||||
|
||||
@@ -17,13 +17,15 @@
|
||||
|
||||
import math
|
||||
import os
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torch_npu
|
||||
from vllm.model_executor.layers.rotary_embedding import (
|
||||
DeepseekScalingRotaryEmbedding, MRotaryEmbedding, RotaryEmbedding,
|
||||
YaRNScalingRotaryEmbedding)
|
||||
DeepseekScalingRotaryEmbedding,
|
||||
MRotaryEmbedding,
|
||||
RotaryEmbedding,
|
||||
YaRNScalingRotaryEmbedding,
|
||||
)
|
||||
from vllm.model_executor.layers.rotary_embedding.common import ApplyRotaryEmb
|
||||
from vllm.triton_utils import HAS_TRITON
|
||||
|
||||
@@ -31,8 +33,7 @@ if HAS_TRITON:
|
||||
from vllm.model_executor.layers.rotary_embedding.mrope import triton_mrope
|
||||
|
||||
from vllm_ascend.platform import NPUPlatform
|
||||
from vllm_ascend.utils import (AscendDeviceType, enable_custom_op,
|
||||
get_ascend_device_type, has_rope, is_vl_model)
|
||||
from vllm_ascend.utils import AscendDeviceType, enable_custom_op, get_ascend_device_type, has_rope, is_vl_model
|
||||
|
||||
# Currently, rope ops used on npu requires detached cos && sin as inputs.
|
||||
# However, RotaryEmbedding in vllm use cos_sin_cache as a whole variable.
|
||||
@@ -54,17 +55,13 @@ _cos_slice: torch.Tensor = None
|
||||
_sin_slice: torch.Tensor = None
|
||||
|
||||
|
||||
def set_cos_and_sin(vllm_config, max_num_reqs, decode_token_per_req, dtype,
|
||||
device):
|
||||
def set_cos_and_sin(vllm_config, max_num_reqs, decode_token_per_req, dtype, device):
|
||||
global _cos_mla
|
||||
global _sin_mla
|
||||
global _cos
|
||||
global _sin
|
||||
|
||||
if _cos_mla is not None or \
|
||||
_sin_mla is not None or \
|
||||
_cos is not None or \
|
||||
_sin is not None:
|
||||
if _cos_mla is not None or _sin_mla is not None or _cos is not None or _sin is not None:
|
||||
return
|
||||
|
||||
model_config = vllm_config.model_config
|
||||
@@ -72,36 +69,15 @@ def set_cos_and_sin(vllm_config, max_num_reqs, decode_token_per_req, dtype,
|
||||
|
||||
if model_config.use_mla:
|
||||
rope_dim = model_config.hf_text_config.qk_rope_head_dim
|
||||
_cos_mla = torch.ones(max_num_batched_tokens,
|
||||
1,
|
||||
1,
|
||||
rope_dim,
|
||||
dtype=dtype,
|
||||
device=device)
|
||||
_sin_mla = torch.zeros(max_num_batched_tokens,
|
||||
1,
|
||||
1,
|
||||
rope_dim,
|
||||
dtype=dtype,
|
||||
device=device)
|
||||
_cos_mla = torch.ones(max_num_batched_tokens, 1, 1, rope_dim, dtype=dtype, device=device)
|
||||
_sin_mla = torch.zeros(max_num_batched_tokens, 1, 1, rope_dim, dtype=dtype, device=device)
|
||||
elif not is_vl_model(vllm_config) and has_rope(vllm_config):
|
||||
rope_dim = model_config.get_head_size()
|
||||
# For models using partial rope like Qwen3-Next.
|
||||
if hasattr(model_config.hf_text_config, "partial_rotary_factor"):
|
||||
rope_dim = int(rope_dim *
|
||||
model_config.hf_text_config.partial_rotary_factor)
|
||||
_cos = torch.ones(1,
|
||||
max_num_batched_tokens,
|
||||
1,
|
||||
rope_dim,
|
||||
dtype=dtype,
|
||||
device=device)
|
||||
_sin = torch.zeros(1,
|
||||
max_num_batched_tokens,
|
||||
1,
|
||||
rope_dim,
|
||||
dtype=dtype,
|
||||
device=device)
|
||||
rope_dim = int(rope_dim * model_config.hf_text_config.partial_rotary_factor)
|
||||
_cos = torch.ones(1, max_num_batched_tokens, 1, rope_dim, dtype=dtype, device=device)
|
||||
_sin = torch.zeros(1, max_num_batched_tokens, 1, rope_dim, dtype=dtype, device=device)
|
||||
|
||||
|
||||
def get_cos_and_sin_mla(positions, use_cache=False):
|
||||
@@ -139,8 +115,7 @@ def _record_cos_and_sin_cache_interleaved(cos_sin_cache):
|
||||
if _cos_cache is not None or _sin_cache is not None:
|
||||
return
|
||||
hidden_dim = cos_sin_cache.shape[-1] // 2
|
||||
cos_cache, sin_cache = cos_sin_cache.view(-1, 2, hidden_dim).repeat(
|
||||
1, 1, 2).chunk(2, dim=1)
|
||||
cos_cache, sin_cache = cos_sin_cache.view(-1, 2, hidden_dim).repeat(1, 1, 2).chunk(2, dim=1)
|
||||
_cos_cache = cos_cache.squeeze(1)
|
||||
_sin_cache = sin_cache.squeeze(1)
|
||||
|
||||
@@ -151,16 +126,16 @@ def update_cos_sin(positions):
|
||||
global _cos_slice
|
||||
global _sin_slice
|
||||
|
||||
if _cos_sin_cache is None or \
|
||||
_cos is None or \
|
||||
_sin is None:
|
||||
if _cos_sin_cache is None or _cos is None or _sin is None:
|
||||
return
|
||||
|
||||
num_tokens = positions.size(0)
|
||||
_cos[:, :num_tokens] = _cos_sin_cache.index_select(0, positions).view(
|
||||
num_tokens, 2, -1).repeat(1, 1, 2).chunk(2, dim=-2)[0]
|
||||
_sin[:, :num_tokens] = _cos_sin_cache.index_select(0, positions).view(
|
||||
num_tokens, 2, -1).repeat(1, 1, 2).chunk(2, dim=-2)[1]
|
||||
_cos[:, :num_tokens] = (
|
||||
_cos_sin_cache.index_select(0, positions).view(num_tokens, 2, -1).repeat(1, 1, 2).chunk(2, dim=-2)[0]
|
||||
)
|
||||
_sin[:, :num_tokens] = (
|
||||
_cos_sin_cache.index_select(0, positions).view(num_tokens, 2, -1).repeat(1, 1, 2).chunk(2, dim=-2)[1]
|
||||
)
|
||||
_cos_slice = _cos[:, :num_tokens]
|
||||
_sin_slice = _sin[:, :num_tokens]
|
||||
|
||||
@@ -170,8 +145,7 @@ def get_cos_and_sin_slice():
|
||||
|
||||
|
||||
def _custom_rotary_embedding_enabled(query, neox_style, head_size):
|
||||
return query.dtype == torch.float16 and neox_style and head_size % 32 == 0 and enable_custom_op(
|
||||
)
|
||||
return query.dtype == torch.float16 and neox_style and head_size % 32 == 0 and enable_custom_op()
|
||||
|
||||
|
||||
def _rope_forward_oot(
|
||||
@@ -180,8 +154,8 @@ def _rope_forward_oot(
|
||||
query: torch.Tensor,
|
||||
key: torch.Tensor,
|
||||
is_neox_style: bool,
|
||||
offsets: Optional[torch.Tensor] = None
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
offsets: torch.Tensor | None = None,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
query_shape, key_shape = query.shape, key.shape
|
||||
if self.cos_sin_cache.device != query.device:
|
||||
self.cos_sin_cache = self.cos_sin_cache.to(query.device)
|
||||
@@ -189,8 +163,7 @@ def _rope_forward_oot(
|
||||
self.cos_sin_cache = self.cos_sin_cache.to(query.dtype)
|
||||
cos, sin = get_cos_and_sin_slice()
|
||||
# adopt custom kernel path for rotary_embedding
|
||||
if _custom_rotary_embedding_enabled(
|
||||
query, is_neox_style, self.head_size):
|
||||
if _custom_rotary_embedding_enabled(query, is_neox_style, self.head_size):
|
||||
query, key = torch.ops._C_ascend.rotary_embedding(
|
||||
positions,
|
||||
query,
|
||||
@@ -201,43 +174,40 @@ def _rope_forward_oot(
|
||||
)
|
||||
return query.view(query_shape), key.view(key_shape)
|
||||
if offsets is not None:
|
||||
raise NotImplementedError(
|
||||
"Batched rotary embedding is currently not supported on NPU.")
|
||||
raise NotImplementedError("Batched rotary embedding is currently not supported on NPU.")
|
||||
else:
|
||||
if is_neox_style and self.head_size == 128 and self.cos_sin_cache.shape[
|
||||
-1] == 128 and cos is not None and sin is not None:
|
||||
if (
|
||||
is_neox_style
|
||||
and self.head_size == 128
|
||||
and self.cos_sin_cache.shape[-1] == 128
|
||||
and cos is not None
|
||||
and sin is not None
|
||||
):
|
||||
# If cos and sin are generated outside, use npu_apply_rotary_pos_emb to avoid redundant calculation.
|
||||
# This method requires head_size and rotary_dim equal 128 and neox_style is True
|
||||
query = query.contiguous().view(1, query.shape[0], -1,
|
||||
self.head_size)
|
||||
query = query.contiguous().view(1, query.shape[0], -1, self.head_size)
|
||||
key = key.contiguous().view(1, key.shape[0], -1, self.head_size)
|
||||
# Although this function modifies in-place, please retain the function's return value.
|
||||
# Otherwise, the graph fusion operation may fail.
|
||||
query, key = torch_npu.npu_apply_rotary_pos_emb(
|
||||
query, key, cos, sin)
|
||||
query, key = torch_npu.npu_apply_rotary_pos_emb(query, key, cos, sin)
|
||||
elif self.rotary_dim < self.head_size:
|
||||
if HAS_TRITON:
|
||||
|
||||
if HAS_TRITON:
|
||||
cos = cos.view(-1, self.rotary_dim)
|
||||
sin = sin.view(-1, self.rotary_dim)
|
||||
q = query.contiguous().view(query.shape[0], -1,
|
||||
self.head_size)
|
||||
q = query.contiguous().view(query.shape[0], -1, self.head_size)
|
||||
k = key.contiguous().view(key.shape[0], -1, self.head_size)
|
||||
query, key = torch.ops.vllm.rope_forward_triton(q,
|
||||
k,
|
||||
cos,
|
||||
sin,
|
||||
rope_dim=self.rotary_dim,
|
||||
is_neox_style=True)
|
||||
query, key = torch.ops.vllm.rope_forward_triton(
|
||||
q, k, cos, sin, rope_dim=self.rotary_dim, is_neox_style=True
|
||||
)
|
||||
return query.view(query_shape), key.view(key_shape)
|
||||
else:
|
||||
num_tokens = query.shape[0]
|
||||
query = query.view(num_tokens, -1, self.head_size)
|
||||
key = key.view(num_tokens, -1, self.head_size)
|
||||
q_rot = query[..., :self.rotary_dim]
|
||||
q_pass = query[..., self.rotary_dim:]
|
||||
k_rot = key[..., :self.rotary_dim]
|
||||
k_pass = key[..., self.rotary_dim:]
|
||||
q_rot = query[..., : self.rotary_dim]
|
||||
q_pass = query[..., self.rotary_dim :]
|
||||
k_rot = key[..., : self.rotary_dim]
|
||||
k_pass = key[..., self.rotary_dim :]
|
||||
q_rot = q_rot.contiguous().view(num_tokens, -1)
|
||||
k_rot = k_rot.contiguous().view(num_tokens, -1)
|
||||
# only the rotary part is processed here,
|
||||
@@ -271,7 +241,6 @@ def _rope_forward_oot(
|
||||
|
||||
|
||||
class AscendRotaryEmbedding(RotaryEmbedding):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
head_size: int,
|
||||
@@ -281,8 +250,7 @@ class AscendRotaryEmbedding(RotaryEmbedding):
|
||||
is_neox_style: bool,
|
||||
dtype: torch.dtype,
|
||||
) -> None:
|
||||
super().__init__(head_size, rotary_dim, max_position_embeddings, base,
|
||||
is_neox_style, dtype)
|
||||
super().__init__(head_size, rotary_dim, max_position_embeddings, base, is_neox_style, dtype)
|
||||
_record_cos_sin_cache(self.cos_sin_cache)
|
||||
_record_cos_and_sin_cache_interleaved(self.cos_sin_cache)
|
||||
|
||||
@@ -291,18 +259,16 @@ class AscendRotaryEmbedding(RotaryEmbedding):
|
||||
positions: torch.Tensor,
|
||||
query: torch.Tensor,
|
||||
key: torch.Tensor,
|
||||
offsets: Optional[torch.Tensor] = None,
|
||||
is_neox_style_override: Optional[bool] = None,
|
||||
offsets: torch.Tensor | None = None,
|
||||
is_neox_style_override: bool | None = None,
|
||||
):
|
||||
is_neox_style = self.is_neox_style
|
||||
if is_neox_style_override is not None:
|
||||
is_neox_style = is_neox_style_override
|
||||
return _rope_forward_oot(self, positions, query, key, is_neox_style,
|
||||
offsets)
|
||||
return _rope_forward_oot(self, positions, query, key, is_neox_style, offsets)
|
||||
|
||||
|
||||
class AscendYaRNRotaryEmbedding(YaRNScalingRotaryEmbedding):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
head_size: int,
|
||||
@@ -322,10 +288,11 @@ class AscendYaRNRotaryEmbedding(YaRNScalingRotaryEmbedding):
|
||||
"extrapolation_factor": extrapolation_factor,
|
||||
"attn_factor": attn_factor,
|
||||
"beta_fast": beta_fast,
|
||||
"beta_slow": beta_slow
|
||||
"beta_slow": beta_slow,
|
||||
}
|
||||
super().__init__(head_size, rotary_dim, max_position_embeddings, base,
|
||||
is_neox_style, scaling_factor, dtype, **extra_kwargs)
|
||||
super().__init__(
|
||||
head_size, rotary_dim, max_position_embeddings, base, is_neox_style, scaling_factor, dtype, **extra_kwargs
|
||||
)
|
||||
_record_cos_sin_cache(self.cos_sin_cache)
|
||||
|
||||
def forward_oot(
|
||||
@@ -333,16 +300,13 @@ class AscendYaRNRotaryEmbedding(YaRNScalingRotaryEmbedding):
|
||||
positions: torch.Tensor,
|
||||
query: torch.Tensor,
|
||||
key: torch.Tensor,
|
||||
offsets: Optional[torch.Tensor] = None,
|
||||
is_neox_style_override: Optional[bool] = None,
|
||||
offsets: torch.Tensor | None = None,
|
||||
is_neox_style_override: bool | None = None,
|
||||
):
|
||||
return AscendRotaryEmbedding.forward_oot(self, positions, query, key,
|
||||
offsets,
|
||||
is_neox_style_override)
|
||||
return AscendRotaryEmbedding.forward_oot(self, positions, query, key, offsets, is_neox_style_override)
|
||||
|
||||
|
||||
class AscendDeepseekScalingRotaryEmbedding(DeepseekScalingRotaryEmbedding):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
head_size: int,
|
||||
@@ -370,18 +334,17 @@ class AscendDeepseekScalingRotaryEmbedding(DeepseekScalingRotaryEmbedding):
|
||||
self.beta_slow = beta_slow
|
||||
# Get n-d magnitude scaling corrected for interpolation.
|
||||
self.mscale = float(
|
||||
self._yarn_get_mscale(self.scaling_factor, float(mscale)) /
|
||||
self._yarn_get_mscale(self.scaling_factor, float(mscale_all_dim)) *
|
||||
attn_factor)
|
||||
super(DeepseekScalingRotaryEmbedding,
|
||||
self).__init__(head_size, rotary_dim, max_position_embeddings,
|
||||
base, is_neox_style, dtype)
|
||||
self._yarn_get_mscale(self.scaling_factor, float(mscale))
|
||||
/ self._yarn_get_mscale(self.scaling_factor, float(mscale_all_dim))
|
||||
* attn_factor
|
||||
)
|
||||
super(DeepseekScalingRotaryEmbedding, self).__init__(
|
||||
head_size, rotary_dim, max_position_embeddings, base, is_neox_style, dtype
|
||||
)
|
||||
|
||||
# NOTE: For ascend friendly computing, reorder sin and cos cache
|
||||
self.max_seq_len = math.ceil(max_position_embeddings * scaling_factor)
|
||||
self._set_cos_sin_cache(self.max_seq_len,
|
||||
device=NPUPlatform.device_type,
|
||||
dtype=dtype)
|
||||
self._set_cos_sin_cache(self.max_seq_len, device=NPUPlatform.device_type, dtype=dtype)
|
||||
|
||||
def _yarn_get_mscale(self, scale: float = 1, mscale: float = 1) -> float:
|
||||
if scale <= 1:
|
||||
@@ -390,56 +353,35 @@ class AscendDeepseekScalingRotaryEmbedding(DeepseekScalingRotaryEmbedding):
|
||||
|
||||
def _rotate_half(self, x):
|
||||
"""Rotates half the hidden dims of the input."""
|
||||
x1 = x[..., :x.shape[-1] // 2]
|
||||
x2 = x[..., x.shape[-1] // 2:]
|
||||
x1 = x[..., : x.shape[-1] // 2]
|
||||
x2 = x[..., x.shape[-1] // 2 :]
|
||||
return torch.cat((-x2, x1), dim=-1)
|
||||
|
||||
def _yarn_linear_ramp_mask(self, min_value, max_value, dim):
|
||||
# Note: The if conditional branch is not used here
|
||||
# to solve MTP compilation error.
|
||||
max_value += (min_value == max_value).float() * 0.001
|
||||
linear_func = (torch.arange(dim, dtype=torch.float32) -
|
||||
min_value) / (max_value - min_value)
|
||||
linear_func = (torch.arange(dim, dtype=torch.float32) - min_value) / (max_value - min_value)
|
||||
ramp_func = torch.clamp(linear_func, 0, 1)
|
||||
return ramp_func
|
||||
|
||||
# Inverse dim formula to find dim based on number of rotations
|
||||
def _yarn_find_correction_dim(self,
|
||||
num_rotations,
|
||||
dim,
|
||||
base=10000,
|
||||
max_position_embeddings=2048):
|
||||
def _yarn_find_correction_dim(self, num_rotations, dim, base=10000, max_position_embeddings=2048):
|
||||
# Note: use torch instead of math to solve MTP compilation error.
|
||||
return (dim * torch.log(
|
||||
torch.tensor(max_position_embeddings) /
|
||||
(num_rotations * 2 * torch.pi))) / (2 *
|
||||
torch.log(torch.tensor(base)))
|
||||
return (dim * torch.log(torch.tensor(max_position_embeddings) / (num_rotations * 2 * torch.pi))) / (
|
||||
2 * torch.log(torch.tensor(base))
|
||||
)
|
||||
|
||||
# Find dim range bounds based on rotations
|
||||
def _yarn_find_correction_range(self,
|
||||
low_rot,
|
||||
high_rot,
|
||||
dim,
|
||||
base=10000,
|
||||
max_position_embeddings=2048):
|
||||
def _yarn_find_correction_range(self, low_rot, high_rot, dim, base=10000, max_position_embeddings=2048):
|
||||
# Note: use torch instead of math to solve MTP compilation error.
|
||||
low = torch.floor(
|
||||
self._yarn_find_correction_dim(low_rot, dim, base,
|
||||
max_position_embeddings))
|
||||
high = torch.ceil(
|
||||
self._yarn_find_correction_dim(high_rot, dim, base,
|
||||
max_position_embeddings))
|
||||
low = torch.floor(self._yarn_find_correction_dim(low_rot, dim, base, max_position_embeddings))
|
||||
high = torch.ceil(self._yarn_find_correction_dim(high_rot, dim, base, max_position_embeddings))
|
||||
# Note: use torch instead of max/min to solve MTP compilation error.
|
||||
return torch.clamp(low, min=0), torch.clamp(high, max=dim - 1)
|
||||
|
||||
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
|
||||
def _apply_rotary_pos_emb(self,
|
||||
q,
|
||||
k,
|
||||
cos,
|
||||
sin,
|
||||
position_ids,
|
||||
unsqueeze_dim=1):
|
||||
def _apply_rotary_pos_emb(self, q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
||||
"""Applies Rotary Position Embedding to the query and key tensors.
|
||||
Args:
|
||||
q (`torch.Tensor`): The query tensor.
|
||||
@@ -451,11 +393,11 @@ class AscendDeepseekScalingRotaryEmbedding(DeepseekScalingRotaryEmbedding):
|
||||
used to pass offsetted position ids when working with a KV-cache.
|
||||
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
||||
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
||||
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
||||
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
||||
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
||||
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
||||
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
||||
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example,
|
||||
note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim].
|
||||
Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1
|
||||
makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly,
|
||||
if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
||||
Returns:
|
||||
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
||||
"""
|
||||
@@ -488,10 +430,10 @@ class AscendDeepseekScalingRotaryEmbedding(DeepseekScalingRotaryEmbedding):
|
||||
def _set_cos_sin_cache(self, max_seq_len, device, dtype):
|
||||
dim = self.rotary_dim
|
||||
|
||||
freq_extra = 1.0 / (self.base**(
|
||||
torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim))
|
||||
freq_inter = 1.0 / (self.scaling_factor * self.base**(
|
||||
torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim))
|
||||
freq_extra = 1.0 / (self.base ** (torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim))
|
||||
freq_inter = 1.0 / (
|
||||
self.scaling_factor * self.base ** (torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim)
|
||||
)
|
||||
|
||||
low, high = self._yarn_find_correction_range(
|
||||
self.beta_fast,
|
||||
@@ -500,10 +442,8 @@ class AscendDeepseekScalingRotaryEmbedding(DeepseekScalingRotaryEmbedding):
|
||||
self.base,
|
||||
self.max_position_embeddings,
|
||||
)
|
||||
inv_freq_mask = 1.0 - self._yarn_linear_ramp_mask(
|
||||
low, high, dim // 2).to(device=device, dtype=torch.float32)
|
||||
inv_freq = freq_inter * (1 -
|
||||
inv_freq_mask) + freq_extra * inv_freq_mask
|
||||
inv_freq_mask = 1.0 - self._yarn_linear_ramp_mask(low, high, dim // 2).to(device=device, dtype=torch.float32)
|
||||
inv_freq = freq_inter * (1 - inv_freq_mask) + freq_extra * inv_freq_mask
|
||||
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
||||
|
||||
t = torch.arange(max_seq_len, device=device, dtype=torch.float32)
|
||||
@@ -513,20 +453,16 @@ class AscendDeepseekScalingRotaryEmbedding(DeepseekScalingRotaryEmbedding):
|
||||
sin_cached = torch.cat([freqs, freqs], dim=-1).sin() * self.mscale
|
||||
cos_cached = cos_cached.to(dtype)
|
||||
sin_cached = sin_cached.to(dtype)
|
||||
cache = torch.cat(
|
||||
[freqs.cos() * self.mscale,
|
||||
freqs.sin() * self.mscale], dim=-1).to(dtype)
|
||||
cache = torch.cat([freqs.cos() * self.mscale, freqs.sin() * self.mscale], dim=-1).to(dtype)
|
||||
self.register_buffer("cos_sin_cache", cache, persistent=False)
|
||||
self.register_buffer("cos_cached", cos_cached, persistent=False)
|
||||
self.register_buffer("sin_cached", sin_cached, persistent=False)
|
||||
_record_cos_sin_cache(cache)
|
||||
_record_cos_and_sin_cache(cos_cached, sin_cached)
|
||||
|
||||
def forward(self,
|
||||
positions: torch.Tensor,
|
||||
query: torch.Tensor,
|
||||
key: torch.Tensor,
|
||||
offsets: Optional[torch.Tensor] = None):
|
||||
def forward(
|
||||
self, positions: torch.Tensor, query: torch.Tensor, key: torch.Tensor, offsets: torch.Tensor | None = None
|
||||
):
|
||||
if len(key.shape) == 2:
|
||||
key = key[:, None, :]
|
||||
# Note: we implement the non neox_style method with shuffle the last dim and neox style
|
||||
@@ -535,26 +471,24 @@ class AscendDeepseekScalingRotaryEmbedding(DeepseekScalingRotaryEmbedding):
|
||||
is_neox_style = True
|
||||
if self.is_neox_style is False:
|
||||
b, h_q, d = query.shape
|
||||
query = query.view(b, h_q, d // 2,
|
||||
2).transpose(3, 2).reshape(b, h_q, d)
|
||||
query = query.view(b, h_q, d // 2, 2).transpose(3, 2).reshape(b, h_q, d)
|
||||
b, h_k, d = key.shape
|
||||
key = key.view(b, h_k, d // 2, 2).transpose(3,
|
||||
2).reshape(b, h_k, d)
|
||||
q_pe, k_pe = _rope_forward_oot(self, positions, query, key,
|
||||
is_neox_style, offsets)
|
||||
key = key.view(b, h_k, d // 2, 2).transpose(3, 2).reshape(b, h_k, d)
|
||||
q_pe, k_pe = _rope_forward_oot(self, positions, query, key, is_neox_style, offsets)
|
||||
return q_pe, k_pe
|
||||
|
||||
|
||||
class AscendMRotaryEmbedding(MRotaryEmbedding):
|
||||
|
||||
# Empirical safety threshold for large Triton grids on Ascend NPU
|
||||
_ASCEND_TRITON_GRID_LIMIT = 65535
|
||||
|
||||
def forward_triton(self,
|
||||
positions: torch.Tensor,
|
||||
query: torch.Tensor,
|
||||
key: torch.Tensor | None = None,
|
||||
offsets: torch.Tensor | None = None):
|
||||
def forward_triton(
|
||||
self,
|
||||
positions: torch.Tensor,
|
||||
query: torch.Tensor,
|
||||
key: torch.Tensor | None = None,
|
||||
offsets: torch.Tensor | None = None,
|
||||
):
|
||||
assert positions.ndim == 2
|
||||
assert key is not None
|
||||
|
||||
@@ -571,10 +505,9 @@ class AscendMRotaryEmbedding(MRotaryEmbedding):
|
||||
|
||||
assert self.mrope_section
|
||||
|
||||
# When the grid becomes large, enable TRITON_ALL_BLOCKS_PARALLEL
|
||||
# When the grid becomes large, enable TRITON_ALL_BLOCKS_PARALLEL
|
||||
# to avoid scheduler/runtime failures.
|
||||
if (query_shape[0] > self._ASCEND_TRITON_GRID_LIMIT and
|
||||
os.environ.get("TRITON_ALL_BLOCKS_PARALLEL") != "1"):
|
||||
if query_shape[0] > self._ASCEND_TRITON_GRID_LIMIT and os.environ.get("TRITON_ALL_BLOCKS_PARALLEL") != "1":
|
||||
os.environ["TRITON_ALL_BLOCKS_PARALLEL"] = "1"
|
||||
|
||||
q, k = triton_mrope(
|
||||
@@ -600,35 +533,37 @@ class AscendMRotaryEmbedding(MRotaryEmbedding):
|
||||
# todo: need cann update in 8.5.0
|
||||
return self.forward_triton(positions, query, key)
|
||||
|
||||
if self.mrope_section != [16, 24, 24] or \
|
||||
get_ascend_device_type() == AscendDeviceType.A5:
|
||||
if self.mrope_section != [16, 24, 24] or get_ascend_device_type() == AscendDeviceType.A5:
|
||||
return super().forward_oot(positions, query, key)
|
||||
|
||||
import torch_npu
|
||||
mrope_section = [0, 0, 0
|
||||
] if positions.ndim == 1 else self.mrope_section
|
||||
|
||||
mrope_section = [0, 0, 0] if positions.ndim == 1 else self.mrope_section
|
||||
|
||||
if self.cos_sin_cache.device != query.device: # type: ignore
|
||||
self.cos_sin_cache = self.cos_sin_cache.to( # type: ignore
|
||||
query.device) # type: ignore
|
||||
query.device
|
||||
) # type: ignore
|
||||
|
||||
if self.cos_sin_cache.dtype != query.dtype: # type: ignore
|
||||
self.cos_sin_cache = self.cos_sin_cache.to( # type: ignore
|
||||
query.dtype) # type: ignore
|
||||
query.dtype
|
||||
) # type: ignore
|
||||
|
||||
query, key = torch_npu.npu_mrope(positions.contiguous(),
|
||||
query.contiguous(),
|
||||
key.contiguous(),
|
||||
self.cos_sin_cache.contiguous(),
|
||||
self.head_size,
|
||||
mrope_section=mrope_section,
|
||||
rotary_mode='half')
|
||||
query, key = torch_npu.npu_mrope(
|
||||
positions.contiguous(),
|
||||
query.contiguous(),
|
||||
key.contiguous(),
|
||||
self.cos_sin_cache.contiguous(),
|
||||
self.head_size,
|
||||
mrope_section=mrope_section,
|
||||
rotary_mode="half",
|
||||
)
|
||||
|
||||
return query, key
|
||||
|
||||
|
||||
class AscendApplyRotaryEmb(ApplyRotaryEmb):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
enforce_enable: bool = False,
|
||||
@@ -647,8 +582,7 @@ class AscendApplyRotaryEmb(ApplyRotaryEmb):
|
||||
cos: torch.Tensor,
|
||||
sin: torch.Tensor,
|
||||
) -> torch.Tensor:
|
||||
x, cos, sin, origin_shape, origin_dtype = self._pre_process(
|
||||
x, cos, sin)
|
||||
x, cos, sin, origin_shape, origin_dtype = self._pre_process(x, cos, sin)
|
||||
|
||||
head_dim = x.shape[-1]
|
||||
# cos, sin: [seq_len, head_dim // 2]
|
||||
|
||||
Reference in New Issue
Block a user