2025-02-05 10:53:12 +08:00
|
|
|
#
|
|
|
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
|
|
|
#
|
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
|
#
|
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
#
|
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
|
# limitations under the License.
|
2025-04-17 14:59:56 +08:00
|
|
|
# This file is a part of the vllm-ascend project.
|
2025-02-05 10:53:12 +08:00
|
|
|
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch
|
2026-01-20 14:43:14 +08:00
|
|
|
from torch import nn
|
2025-10-17 09:30:51 +08:00
|
|
|
from vllm.config import get_current_vllm_config
|
2026-01-20 14:43:14 +08:00
|
|
|
from vllm.model_executor.layers.layernorm import GemmaRMSNorm, RMSNorm, RMSNormGated
|
2026-02-07 09:16:07 +08:00
|
|
|
|
2026-01-20 14:43:14 +08:00
|
|
|
from vllm_ascend.ops.triton.layernorm_gated import layer_norm_fwd_npu
|
2026-02-07 09:16:07 +08:00
|
|
|
from vllm_ascend.utils import enable_custom_op, get_weight_prefetch_method
|
2025-02-05 10:53:12 +08:00
|
|
|
|
2026-01-23 21:09:54 +08:00
|
|
|
|
2025-08-14 17:18:30 +08:00
|
|
|
class AscendRMSNorm(RMSNorm):
|
2025-10-17 09:30:51 +08:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
hidden_size: int,
|
|
|
|
|
eps: float = 1e-6,
|
2026-02-07 09:16:07 +08:00
|
|
|
var_hidden_size: int | None = None,
|
2025-10-17 09:30:51 +08:00
|
|
|
has_weight: bool = True,
|
2026-02-07 09:16:07 +08:00
|
|
|
dtype: torch.dtype | None = None,
|
2025-10-17 09:30:51 +08:00
|
|
|
) -> None:
|
|
|
|
|
super().__init__(hidden_size, eps, var_hidden_size, has_weight, dtype)
|
|
|
|
|
vllm_config = get_current_vllm_config()
|
|
|
|
|
self.bias = None
|
2026-03-17 16:53:28 +08:00
|
|
|
self.bias_loaded = False
|
|
|
|
|
|
2025-10-17 09:30:51 +08:00
|
|
|
# quantization with anti_method m4 will generate none-zero norm bias
|
2026-02-07 09:16:07 +08:00
|
|
|
if vllm_config.quant_config is not None and any(
|
|
|
|
|
"norm.bias" in name for name in vllm_config.quant_config.quant_description
|
|
|
|
|
):
|
|
|
|
|
self.bias = torch.nn.Parameter(torch.zeros(hidden_size), requires_grad=False)
|
2026-03-17 16:53:28 +08:00
|
|
|
self.bias.weight_loader = self._bias_weight_loader
|
|
|
|
|
|
|
|
|
|
def _bias_weight_loader(self, param: torch.nn.Parameter, loaded_weight: torch.Tensor) -> None:
|
|
|
|
|
if param.numel() == 1 and loaded_weight.numel() == 1:
|
|
|
|
|
# Sometimes scalar values aren't considered tensors with shapes
|
|
|
|
|
# so if both param and loaded_weight are a scalar,
|
|
|
|
|
# "broadcast" instead of copy
|
|
|
|
|
param.data.fill_(loaded_weight.item())
|
|
|
|
|
else:
|
|
|
|
|
assert param.size() == loaded_weight.size(), (
|
|
|
|
|
f"Attempted to load weight ({loaded_weight.size()}) into parameter ({param.size()})"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
param.data.copy_(loaded_weight)
|
|
|
|
|
self.bias_loaded = True
|
2025-10-17 09:30:51 +08:00
|
|
|
|
2025-08-14 17:18:30 +08:00
|
|
|
def forward_oot(
|
|
|
|
|
self,
|
|
|
|
|
x: torch.Tensor,
|
2026-02-07 09:16:07 +08:00
|
|
|
residual: torch.Tensor | None = None,
|
|
|
|
|
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
2025-08-14 17:18:30 +08:00
|
|
|
import torch_npu
|
2025-12-02 13:15:47 +08:00
|
|
|
|
2025-08-14 17:18:30 +08:00
|
|
|
if residual is not None:
|
2026-03-22 21:05:28 +08:00
|
|
|
residual = torch.ops.vllm.maybe_chunk_residual(x, residual)
|
2026-01-24 20:34:29 +08:00
|
|
|
if enable_custom_op():
|
2026-01-23 21:09:54 +08:00
|
|
|
x, _, residual = torch.ops._C_ascend.npu_add_rms_norm_bias(
|
2026-02-07 09:16:07 +08:00
|
|
|
x, residual, self.weight, self.bias, self.variance_epsilon
|
|
|
|
|
)
|
Adopt inductor fusion and define quantization fusion pass (#4168)
### What this PR does / why we need it?
The main goal of this PR to alleviate the high maintenance burden from
model duplication when we are going to do the model optimization. Some
of our optimized models diverges a little from the vllm's modeling, but
needs to rewrite several part of original one, brings negligible
maintenance bruden to the vllm-ascend.In order to solve that, we propose
to leverage `torch.compile` and `inductor pattern matcher`,
automatically fuse the pattern we want to merge. For more details can
refer to the RFC https://github.com/vllm-project/vllm-ascend/issues/4239
This pr integrates `AddRMSNorm` and the `Quant` operator, which can
improve the inference speed of models using `w8a8 `quantization.
### Does this PR introduce _any_ user-facing change?
Yes, add new additional_config
### How was this patch tested?
```python
def main():
prompts = [
"The president of the United States is Mr.",
]
# Create a sampling params object.
sampling_params = SamplingParams(max_tokens=100, temperature=0.6, top_k=40, top_p=0.95)
# Create an LLM.
llm = LLM(
model="/root/.cache/modelscope/hub/models/vllm-ascend/Qwen3-8B-W8A8",
# enforce_eager=True,
tensor_parallel_size=1,
trust_remote_code=True,
gpu_memory_utilization=0.7,
quantization="ascend",
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
```text
Prompt: 'The president of the United States is Mr.', Generated text: ' Trump. The president of the United States is Mr. Biden. Which of the following statements is correct? \n\nA. Mr. Trump is Mr. Biden. \nB. Mr. Trump is not Mr. Biden. \nC. The president of the United States is not Mr. Trump. \nD. The president of the United States is not Mr. Biden.\n\nThe question presents a contradiction: it states that "The president of the United States is Mr. Trump" and "The president of'
```
- vLLM version: 86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
- vLLM main:
https://github.com/vllm-project/vllm/commit/86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
---------
Signed-off-by: Icey <1790571317@qq.com>
Signed-off-by: wxsIcey <1790571317@qq.com>
2025-12-04 10:29:48 +08:00
|
|
|
else:
|
2026-02-07 09:16:07 +08:00
|
|
|
x, _, residual = torch_npu.npu_add_rms_norm(x, residual, self.weight, self.variance_epsilon)
|
Adopt inductor fusion and define quantization fusion pass (#4168)
### What this PR does / why we need it?
The main goal of this PR to alleviate the high maintenance burden from
model duplication when we are going to do the model optimization. Some
of our optimized models diverges a little from the vllm's modeling, but
needs to rewrite several part of original one, brings negligible
maintenance bruden to the vllm-ascend.In order to solve that, we propose
to leverage `torch.compile` and `inductor pattern matcher`,
automatically fuse the pattern we want to merge. For more details can
refer to the RFC https://github.com/vllm-project/vllm-ascend/issues/4239
This pr integrates `AddRMSNorm` and the `Quant` operator, which can
improve the inference speed of models using `w8a8 `quantization.
### Does this PR introduce _any_ user-facing change?
Yes, add new additional_config
### How was this patch tested?
```python
def main():
prompts = [
"The president of the United States is Mr.",
]
# Create a sampling params object.
sampling_params = SamplingParams(max_tokens=100, temperature=0.6, top_k=40, top_p=0.95)
# Create an LLM.
llm = LLM(
model="/root/.cache/modelscope/hub/models/vllm-ascend/Qwen3-8B-W8A8",
# enforce_eager=True,
tensor_parallel_size=1,
trust_remote_code=True,
gpu_memory_utilization=0.7,
quantization="ascend",
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
```text
Prompt: 'The president of the United States is Mr.', Generated text: ' Trump. The president of the United States is Mr. Biden. Which of the following statements is correct? \n\nA. Mr. Trump is Mr. Biden. \nB. Mr. Trump is not Mr. Biden. \nC. The president of the United States is not Mr. Trump. \nD. The president of the United States is not Mr. Biden.\n\nThe question presents a contradiction: it states that "The president of the United States is Mr. Trump" and "The president of'
```
- vLLM version: 86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
- vLLM main:
https://github.com/vllm-project/vllm/commit/86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
---------
Signed-off-by: Icey <1790571317@qq.com>
Signed-off-by: wxsIcey <1790571317@qq.com>
2025-12-04 10:29:48 +08:00
|
|
|
if self.bias is not None:
|
|
|
|
|
x.add_(self.bias)
|
2025-08-14 17:18:30 +08:00
|
|
|
return x, residual
|
Adopt inductor fusion and define quantization fusion pass (#4168)
### What this PR does / why we need it?
The main goal of this PR to alleviate the high maintenance burden from
model duplication when we are going to do the model optimization. Some
of our optimized models diverges a little from the vllm's modeling, but
needs to rewrite several part of original one, brings negligible
maintenance bruden to the vllm-ascend.In order to solve that, we propose
to leverage `torch.compile` and `inductor pattern matcher`,
automatically fuse the pattern we want to merge. For more details can
refer to the RFC https://github.com/vllm-project/vllm-ascend/issues/4239
This pr integrates `AddRMSNorm` and the `Quant` operator, which can
improve the inference speed of models using `w8a8 `quantization.
### Does this PR introduce _any_ user-facing change?
Yes, add new additional_config
### How was this patch tested?
```python
def main():
prompts = [
"The president of the United States is Mr.",
]
# Create a sampling params object.
sampling_params = SamplingParams(max_tokens=100, temperature=0.6, top_k=40, top_p=0.95)
# Create an LLM.
llm = LLM(
model="/root/.cache/modelscope/hub/models/vllm-ascend/Qwen3-8B-W8A8",
# enforce_eager=True,
tensor_parallel_size=1,
trust_remote_code=True,
gpu_memory_utilization=0.7,
quantization="ascend",
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
```text
Prompt: 'The president of the United States is Mr.', Generated text: ' Trump. The president of the United States is Mr. Biden. Which of the following statements is correct? \n\nA. Mr. Trump is Mr. Biden. \nB. Mr. Trump is not Mr. Biden. \nC. The president of the United States is not Mr. Trump. \nD. The president of the United States is not Mr. Biden.\n\nThe question presents a contradiction: it states that "The president of the United States is Mr. Trump" and "The president of'
```
- vLLM version: 86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
- vLLM main:
https://github.com/vllm-project/vllm/commit/86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
---------
Signed-off-by: Icey <1790571317@qq.com>
Signed-off-by: wxsIcey <1790571317@qq.com>
2025-12-04 10:29:48 +08:00
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
x, residual = torch_npu.npu_rms_norm(x, self.weight, self.variance_epsilon)
|
2026-03-17 16:53:28 +08:00
|
|
|
if self.bias_loaded:
|
2025-10-17 09:30:51 +08:00
|
|
|
x.add_(self.bias)
|
2026-02-04 09:08:18 +08:00
|
|
|
|
|
|
|
|
weight_prefetch_method = get_weight_prefetch_method()
|
2026-02-10 14:14:37 +08:00
|
|
|
weight_prefetch_method.maybe_prefetch_mlp_weight_postprocess(x)
|
2025-08-14 17:18:30 +08:00
|
|
|
return x
|
2025-09-11 23:14:02 +08:00
|
|
|
|
|
|
|
|
|
2025-09-28 21:19:10 +08:00
|
|
|
class AscendGemmaRMSNorm(GemmaRMSNorm):
|
|
|
|
|
def forward_oot(
|
|
|
|
|
self,
|
|
|
|
|
x: torch.Tensor,
|
2026-02-07 09:16:07 +08:00
|
|
|
residual: torch.Tensor | None = None,
|
|
|
|
|
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
2025-09-28 21:19:10 +08:00
|
|
|
import torch_npu
|
|
|
|
|
|
|
|
|
|
if residual is not None:
|
Qwen3.5 MoE supports flashcomm v1 (#7644)
cherry pick from https://github.com/vllm-project/vllm-ascend/pull/7486
<!-- Thanks for sending a pull request!
BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html
-->
### What this PR does / why we need it?
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.
- Please clarify why the changes are needed. For instance, the use case
and bug description.
- Fixes #
-->
Multimodal models like Qwen3.5 MoE does embedding in model_runner, so
when flash comm is enabled, the first AllGather operation should be
skipped.
### Does this PR introduce _any_ user-facing change?
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->
No.
### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
- vLLM version: v0.18.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/8b6325758cce5f9c36d38f2462edbd368b97a07c
---------
Signed-off-by: Wangbingjie <wangbj1207@126.com>
Signed-off-by: wangbj127 <256472688+wangbj127@users.noreply.github.com>
2026-03-25 23:09:33 +08:00
|
|
|
residual = torch.ops.vllm.maybe_chunk_residual(x, residual)
|
2026-01-24 20:34:29 +08:00
|
|
|
if enable_custom_op():
|
2026-01-23 21:09:54 +08:00
|
|
|
x, _, residual = torch.ops._C_ascend.npu_add_rms_norm_bias(
|
2026-02-07 09:16:07 +08:00
|
|
|
x, residual, 1.0 + self.weight, None, self.variance_epsilon
|
|
|
|
|
)
|
2025-09-28 21:19:10 +08:00
|
|
|
else:
|
2026-02-07 09:16:07 +08:00
|
|
|
x, _, residual = torch_npu.npu_add_rms_norm(x, residual, 1.0 + self.weight, self.variance_epsilon)
|
2025-09-28 21:19:10 +08:00
|
|
|
return x, residual
|
|
|
|
|
|
2026-03-05 16:15:07 +08:00
|
|
|
x, _ = torch.ops._C_ascend.npu_gemma_rms_norm(x, self.weight, self.variance_epsilon)
|
2025-09-28 21:19:10 +08:00
|
|
|
return x
|
2026-01-20 14:43:14 +08:00
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
|
2026-01-20 14:43:14 +08:00
|
|
|
class LayerNormFn(torch.autograd.Function):
|
|
|
|
|
@staticmethod
|
2026-02-07 09:16:07 +08:00
|
|
|
def forward(ctx, x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True, is_rms_norm=False):
|
|
|
|
|
"""If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z))"""
|
2026-01-20 14:43:14 +08:00
|
|
|
|
|
|
|
|
x_shape_og = x.shape
|
|
|
|
|
# reshape input data into 2D tensor
|
|
|
|
|
x = x.reshape(-1, x.shape[-1])
|
|
|
|
|
if x.stride(-1) != 1:
|
|
|
|
|
x = x.contiguous()
|
|
|
|
|
if z is not None:
|
|
|
|
|
assert z.shape == x_shape_og
|
|
|
|
|
z = z.reshape(-1, z.shape[-1])
|
|
|
|
|
if z.stride(-1) != 1:
|
|
|
|
|
z = z.contiguous()
|
|
|
|
|
weight = weight.contiguous()
|
|
|
|
|
if bias is not None:
|
|
|
|
|
bias = bias.contiguous()
|
|
|
|
|
y, mean, rstd = layer_norm_fwd_npu(
|
|
|
|
|
x,
|
|
|
|
|
weight,
|
|
|
|
|
bias,
|
|
|
|
|
eps,
|
|
|
|
|
z=z,
|
|
|
|
|
group_size=group_size,
|
|
|
|
|
norm_before_gate=norm_before_gate,
|
|
|
|
|
is_rms_norm=is_rms_norm,
|
|
|
|
|
)
|
|
|
|
|
ctx.save_for_backward(x, weight, bias, mean, rstd, z)
|
|
|
|
|
ctx.x_shape_og = x_shape_og
|
|
|
|
|
ctx.eps = eps
|
|
|
|
|
ctx.group_size = group_size
|
|
|
|
|
ctx.norm_before_gate = norm_before_gate
|
|
|
|
|
ctx.is_rms_norm = is_rms_norm
|
|
|
|
|
return y.reshape(x_shape_og)
|
|
|
|
|
|
2026-02-06 17:23:58 +08:00
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
class AscendRMSNormGated(RMSNormGated):
|
2026-01-20 14:43:14 +08:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
hidden_size,
|
|
|
|
|
eps: float = 1e-5,
|
2026-02-07 09:16:07 +08:00
|
|
|
group_size: int | None = None,
|
2026-01-20 14:43:14 +08:00
|
|
|
norm_before_gate: bool = False,
|
2026-02-07 09:16:07 +08:00
|
|
|
device: torch.device | None = None,
|
|
|
|
|
dtype: torch.dtype | None = None,
|
2026-01-20 14:43:14 +08:00
|
|
|
):
|
|
|
|
|
"""If group_size is not None, we do GroupNorm with each group having group_size elements.
|
|
|
|
|
group_size=None is equivalent to group_size=hidden_size (i.e. there's only 1 group).
|
|
|
|
|
"""
|
|
|
|
|
factory_kwargs = {"device": device, "dtype": dtype}
|
|
|
|
|
super().__init__(hidden_size, eps, group_size, norm_before_gate, device, dtype)
|
|
|
|
|
self.eps = eps
|
|
|
|
|
self.weight = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
|
|
|
|
|
self.register_parameter("bias", None)
|
|
|
|
|
self.group_size = group_size
|
|
|
|
|
self.norm_before_gate = norm_before_gate
|
|
|
|
|
self.reset_parameters()
|
|
|
|
|
|
|
|
|
|
def reset_parameters(self):
|
|
|
|
|
torch.nn.init.ones_(self.weight)
|
|
|
|
|
|
|
|
|
|
def forward_oot(self, x, z=None):
|
2026-02-07 09:16:07 +08:00
|
|
|
"""If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z))"""
|
|
|
|
|
return LayerNormFn.apply(x, self.weight, self.bias, z, self.eps, self.group_size, self.norm_before_gate, True)
|