diff --git a/Dockerfile.ascend b/Dockerfile.ascend new file mode 100644 index 0000000..fac49b2 --- /dev/null +++ b/Dockerfile.ascend @@ -0,0 +1,9 @@ +FROM git.modelhub.org.cn:9443/enginex-ascend/vllm-ascend:v0.10.0rc1 + +WORKDIR /workspace + +RUN pip install diffusers==0.34.0 +RUN pip install imageio[ffmpeg] einops datasets==3.2.0 simplejson addict open_clip_torch==2.24.0 sortedcontainers modelscope==1.28.2 av==11.0.0 pytorch-lightning +COPY . /workspace/ +RUN patch /usr/local/python3.11.13/lib/python3.11/site-packages/modelscope/models/multi_modal/video_synthesis/text_to_video_synthesis_model.py patch.ascend/text_to_video_synthesis_model.py.patch +RUN patch /usr/local/python3.11.13/lib/python3.11/site-packages/modelscope/utils/device.py patch.ascend/device.py.patch diff --git a/iic.py b/iic.py index 905fff9..0885768 100644 --- a/iic.py +++ b/iic.py @@ -1,26 +1,16 @@ import os import torch -from functools import wraps - -_orig_load = torch.load - -@wraps(_orig_load) -def _load_patch(*args, **kwargs): - kwargs.setdefault("weights_only", False) - return _orig_load(*args, **kwargs) - -torch.load = _load_patch - +import patch from modelscope.pipelines import pipeline from modelscope.outputs import OutputKeys +device = "cuda" if torch.cuda.is_available() else "npu" model_path = "/mnt/contest_ceph/zhanghao/models/iic/text-to-video-synthesis" -p = pipeline('text-to-video-synthesis', model_path) +p = pipeline('text-to-video-synthesis', model_path, device=device) test_text = { - 'text': 'A panda eating bamboo on a rock.', + 'text': 'A panda eating a burger and french fries on a rock.', } -output_video_path = p(test_text, output_video='./output.mp4')[OutputKeys.OUTPUT_VIDEO] +output_video_path = p(test_text, device=device, output_video='./output.mp4')[OutputKeys.OUTPUT_VIDEO] print('output_video_path:', output_video_path) - diff --git a/main.py b/main.py index 64075e5..ad053b0 100644 --- a/main.py +++ b/main.py @@ -8,21 +8,11 @@ import re import time from datetime import datetime from pathlib import Path - -import torch -from functools import wraps - -_orig_load = torch.load - -@wraps(_orig_load) -def _load_patch(*args, **kwargs): - kwargs.setdefault("weights_only", False) - return _orig_load(*args, **kwargs) - -torch.load = _load_patch +import patch from modelscope.pipelines import pipeline from modelscope.outputs import OutputKeys +import torch def safe_stem(text: str, maxlen: int = 60) -> str: @@ -58,7 +48,7 @@ def load_prompts(json_path: Path): def build_pipeline(model_path: str, device: str = "cuda", dtype=torch.float16): - pipe = pipeline('text-to-video-synthesis', model_path) + pipe = pipeline('text-to-video-synthesis', model_path, device=device) return pipe @@ -95,7 +85,7 @@ def main(): parser.add_argument("--json", required=True, help="测试文本 JSON 文件路径") parser.add_argument("--results", required=True, help="结果 JSON 文件输出路径(*.json)") parser.add_argument("--outdir", required=True, help="图片输出目录") - parser.add_argument("--device", default="cuda", choices=["cuda", "cpu"], help="推理设备") + parser.add_argument("--device", default="cuda", help="推理设备") parser.add_argument("--dtype", default="fp16", choices=["fp16", "fp32"], help="推理精度") args, _ = parser.parse_known_args() diff --git a/patch.ascend/device.py.patch b/patch.ascend/device.py.patch new file mode 100644 index 0000000..58c4d32 --- /dev/null +++ b/patch.ascend/device.py.patch @@ -0,0 +1,2 @@ +27d26 +< assert eles[0] in ['cpu', 'cuda', 'gpu'], err_msg diff --git a/patch.ascend/text_to_video_synthesis_model.py.patch b/patch.ascend/text_to_video_synthesis_model.py.patch new file mode 100644 index 0000000..693b916 --- /dev/null +++ b/patch.ascend/text_to_video_synthesis_model.py.patch @@ -0,0 +1,10 @@ +60a61 +> print(f"kwargs: {kwargs}") +62a64 +> print(f"device: {self.device}") +129c131 +< layer='penultimate') +--- +> layer='penultimate', device=self.device) +224a227 +> print(f"self.device: {self.device}") diff --git a/patch.py b/patch.py new file mode 100644 index 0000000..04786e2 --- /dev/null +++ b/patch.py @@ -0,0 +1,43 @@ +import torch +from functools import wraps + +def enable_cuda_to_npu_shim(): + print("enable_cuda_to_npu_shim") + import torch_npu # 注册 npu + # 仅映射常见函数;不要贪多 + torch.cuda.is_available = torch.npu.is_available + torch.cuda.device_count = torch.npu.device_count + torch.cuda.current_device= torch.npu.current_device + torch.cuda.set_device = torch.npu.set_device + torch.cuda.synchronize = torch.npu.synchronize + try: + # 若存在空缓存接口 + torch.cuda.empty_cache = torch.npu.empty_cache # 某些版本可用 + except Exception: + pass + # 设备字符串统一用 npu + # 业务里仍建议 model.to("npu:0") 显式写清 +try: + import torch_npu + if torch.npu.is_available() and not torch.cuda.is_available(): + enable_cuda_to_npu_shim() +except: + print("exception") + +# 1) 可选:如果你的权重来自 lightning 的 ckpt,放行其类(仅在可信来源时) +try: + from torch.serialization import add_safe_globals + from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint + add_safe_globals([ModelCheckpoint]) +except Exception: + pass + +# 2) 统一把 torch.load 默认映射到 CPU,避免 CUDA 反序列化错误 +_orig_load = torch.load +def _load_map_to_cpu(*args, **kwargs): + kwargs.setdefault("map_location", "cpu") + kwargs.setdefault("weights_only", False) + return _orig_load(*args, **kwargs) +torch.load = _load_map_to_cpu + + diff --git a/run_in_docker_ascend.sh b/run_in_docker_ascend.sh new file mode 100755 index 0000000..18c89df --- /dev/null +++ b/run_in_docker_ascend.sh @@ -0,0 +1,4 @@ +#! /usr/bin/env bash +image=harbor-contest.4pd.io/zhanghao/t2v:ascend-0.1 +device=0 +docker run -it -v `pwd`:/host -e ASCEND_VISIBLE_DEVICES=$device --device /dev/davinci$device:/dev/davinci0 --device /dev/davinci_manager --device /dev/devmm_svm --device /dev/hisi_hdc -v /mnt:/mnt -v /usr/local/dcmi:/usr/local/dcmi -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi -v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ -v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info -v /etc/ascend_install.info:/etc/ascend_install.info --privileged --entrypoint bash $image