[7/N][refactor]fix torchair rope ops (#2683)

### What this PR does / why we need it?
Due to the registration mechanism, torchair ops can not take effect, so
have to patch the Ascend ops to adapt torchair

### Does this PR introduce _any_ user-facing change?
No

### How was this patch tested?
vLLM version: main
vLLM main:
7ea22e42d5


- vLLM version: main
- vLLM main:
7ea22e42d5

Signed-off-by: hust17yixuan <303660421@qq.com>
This commit is contained in:
Wang Yixuan
2025-09-02 17:21:56 +08:00
committed by GitHub
parent 9f1e054fe3
commit 253b01b9a5

View File

@@ -185,15 +185,14 @@ def torchair_quant_method_register():
def torchair_ops_patch():
from vllm.model_executor.layers.rotary_embedding import (
DeepseekScalingRotaryEmbedding, RotaryEmbedding)
from vllm_ascend.ops.rotary_embedding import (
AscendDeepseekScalingRotaryEmbedding, AscendRotaryEmbedding)
from vllm_ascend.torchair.ops.torchair_rotary_embedding import (
deepseek_rope_init_func, native_rope_deepseek_forward,
qwen_rope_init_func, rope_forward)
RotaryEmbedding.__init__ = qwen_rope_init_func
RotaryEmbedding.forward_oot = rope_forward
AscendRotaryEmbedding.__init__ = qwen_rope_init_func # type: ignore[method-assign]
AscendRotaryEmbedding.forward_oot = rope_forward # type: ignore[method-assign]
DeepseekScalingRotaryEmbedding.__init__ = deepseek_rope_init_func
DeepseekScalingRotaryEmbedding.forward = native_rope_deepseek_forward
AscendDeepseekScalingRotaryEmbedding.__init__ = deepseek_rope_init_func # type: ignore[method-assign]
AscendDeepseekScalingRotaryEmbedding.forward = native_rope_deepseek_forward # type: ignore[method-assign]