[MISC] Clean up torch_npu (#688)

torch_npu 2.5.1 support autoload now. This patch does:
1. remove useless torch_npu import
2. replace `torch_npu.npu` to `torch.npu`.

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-04-29 18:03:38 +08:00
committed by GitHub
parent 0329fad927
commit b917361ca5
15 changed files with 18 additions and 47 deletions

View File

@@ -16,7 +16,6 @@
#
import torch
import torch_npu # noqa: F401
import vllm_ascend.ops.activation # noqa
import vllm_ascend.ops.common_fused_moe # noqa
@@ -34,7 +33,7 @@ class dummyFusionOp:
def register_dummy_fusion_op() -> None:
torch.cuda.CUDAGraph = torch_npu.npu.NPUGraph
torch.cuda.CUDAGraph = torch.npu.NPUGraph
torch.ops._C.rms_norm = dummyFusionOp(name="rms_norm")
torch.ops._C.fused_add_rms_norm = dummyFusionOp(name="fused_add_rms_norm")
torch.ops._C.static_scaled_fp8_quant = dummyFusionOp(