### What this PR does / why we need it?
- Replace the RoPE operator implementation.
- Refactor some leftover implementations of 300I DUO in the main branch.
### Does this PR introduce _any_ user-facing change?
NA
### How was this patch tested?
- vLLM version: v0.14.1
- vLLM main:
dc917cceb8
---------
Signed-off-by: Tflowers-0129 <2906339855@qq.com>
31 lines
1.1 KiB
Python
31 lines
1.1 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
|
|
import torch
|
|
import torch.nn.functional as F
|
|
|
|
from vllm_ascend.ops.activation import AscendSiluAndMul
|
|
|
|
|
|
class AscendSiluAndMul310(AscendSiluAndMul):
|
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
torch.ops.vllm.maybe_prefetch_mlp_down_proj(x)
|
|
h = x.shape[-1] // 2
|
|
out = F.silu(x[..., :h]) * x[..., h:]
|
|
torch.ops.vllm.maybe_wait_prefetch_done(out)
|
|
return out
|