[310P]: refactoring for 310p kvcache and some ops class (#6117)
### What this PR does / why we need it?
* Refactor the LayerNorm and activation operator classes to decouple the
310P device implementation from the main branch.
* Refactor `mm_encoder_attention` on 310P to use the
`torch_npu._npu_flash_attention_unpad` operator.
* Refactor the QKV inputs in the prefill stage of `attention_v1` on 310P
so they are no longer padded to 16× alignment.
* Refactor `model_runner` on 310P to align the KV-cache initialization
logic with the mainline implementation.
### Does this PR introduce _any_ user-facing change?
NO
### How was this patch tested?
use the e2e tests.
- vLLM version: v0.13.0
- vLLM main:
d68209402d
---------
Signed-off-by: Tflowers-0129 <2906339855@qq.com>
This commit is contained in:
@@ -721,16 +721,17 @@ def register_ascend_customop(vllm_config: VllmConfig | None = None):
|
||||
# 310P: override selected ops with 310P implementations (keep minimal changes outside _310p)
|
||||
if is_310p():
|
||||
from vllm_ascend._310p.ops.activation import AscendSiluAndMul310
|
||||
from vllm_ascend._310p.ops.layernorm import AscendGemmaRMSNorm310, AscendRMSNorm310
|
||||
from vllm_ascend._310p.ops.mm_encoder_attention import AscendMMEncoderAttention310
|
||||
from vllm_ascend._310p.ops.rotary_embedding import (
|
||||
AscendMRotaryEmbedding310,
|
||||
)
|
||||
from vllm_ascend._310p.ops.rotary_embedding import AscendMRotaryEmbedding310
|
||||
|
||||
REGISTERED_ASCEND_OPS.update(
|
||||
{
|
||||
"SiluAndMul": AscendSiluAndMul310,
|
||||
"MMEncoderAttention": AscendMMEncoderAttention310,
|
||||
"MRotaryEmbedding": AscendMRotaryEmbedding310,
|
||||
"RMSNorm": AscendRMSNorm310,
|
||||
"GemmaRMSNorm": AscendGemmaRMSNorm310,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user