[MISC] Clean up torch_npu (#688)
torch_npu 2.5.1 support autoload now. This patch does: 1. remove useless torch_npu import 2. replace `torch_npu.npu` to `torch.npu`. Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -16,7 +16,6 @@
|
||||
#
|
||||
|
||||
import torch
|
||||
import torch_npu # noqa: F401
|
||||
|
||||
import vllm_ascend.ops.activation # noqa
|
||||
import vllm_ascend.ops.common_fused_moe # noqa
|
||||
@@ -34,7 +33,7 @@ class dummyFusionOp:
|
||||
|
||||
|
||||
def register_dummy_fusion_op() -> None:
|
||||
torch.cuda.CUDAGraph = torch_npu.npu.NPUGraph
|
||||
torch.cuda.CUDAGraph = torch.npu.NPUGraph
|
||||
torch.ops._C.rms_norm = dummyFusionOp(name="rms_norm")
|
||||
torch.ops._C.fused_add_rms_norm = dummyFusionOp(name="fused_add_rms_norm")
|
||||
torch.ops._C.static_scaled_fp8_quant = dummyFusionOp(
|
||||
|
||||
Reference in New Issue
Block a user