Compare commits
10 Commits
0ae0bd7904
...
24742d2267
| Author | SHA1 | Date | |
|---|---|---|---|
| 24742d2267 | |||
| a07ba004ef | |||
| 796520a5f7 | |||
| 12a1443b38 | |||
| aead975ce4 | |||
|
|
434159b16b | ||
|
|
cea32765a4 | ||
| 5e2769ecd6 | |||
|
|
d96db0c9a7 | ||
| 495c3fcd8a |
@@ -9,4 +9,4 @@ RUN pip install open_clip_torch==2.24.0 sortedcontainers modelscope av==11.0.0
|
||||
RUN sed -i 's|source /torch/venv3/pytorch/bin/activate|source /torch/venv3/pytorch_infer/bin/activate|' /root/.bashrc
|
||||
|
||||
COPY . /workspace/
|
||||
RUN pip install whls/cambricon_pytorch_lightning-2.5.0+mlu0.7.0-py3-none-any.whl
|
||||
RUN pip install whls.mlu/cambricon_pytorch_lightning-2.5.0+mlu0.7.0-py3-none-any.whl
|
||||
@@ -1,7 +0,0 @@
|
||||
FROM pytorch/pytorch:2.6.0-cuda12.4-cudnn9-devel
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
RUN pip install opencv-python-headless imageio[ffmpeg] einops datasets==3.2.0 simplejson diffusers==0.34.0 open_clip_torch==2.24.0 sortedcontainers modelscope av==11.0.0 addict -i https://nexus.4pd.io/repository/pypi-all/simple
|
||||
|
||||
COPY . /workspace/
|
||||
@@ -1,9 +0,0 @@
|
||||
FROM git.modelhub.org.cn:9443/enginex-ascend/vllm-ascend:v0.10.0rc1
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
RUN pip install diffusers==0.34.0
|
||||
RUN pip install imageio[ffmpeg] einops datasets==3.2.0 simplejson addict open_clip_torch==2.24.0 sortedcontainers modelscope==1.28.2 av==11.0.0 pytorch-lightning
|
||||
COPY . /workspace/
|
||||
RUN patch /usr/local/python3.11.13/lib/python3.11/site-packages/modelscope/models/multi_modal/video_synthesis/text_to_video_synthesis_model.py patch.ascend/text_to_video_synthesis_model.py.patch
|
||||
RUN patch /usr/local/python3.11.13/lib/python3.11/site-packages/modelscope/utils/device.py patch.ascend/device.py.patch
|
||||
@@ -1,8 +0,0 @@
|
||||
FROM git.modelhub.org.cn:9443/enginex-kunlunxin/text2image/r200_8f-diffuser:v0.21.4
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
RUN source /root/miniconda/etc/profile.d/conda.sh && conda activate python38_torch201_cuda && pip install imageio[ffmpeg] einops datasets==3.1.0 simplejson addict sortedcontainers modelscope==1.28.2 av==11.0.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
RUN source /root/miniconda/etc/profile.d/conda.sh && conda activate python38_torch201_cuda && pip install megatron megatron-core urllib3==1.26.20 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
RUN source /root/miniconda/etc/profile.d/conda.sh && conda activate python38_torch201_cuda && pip install open_clip_torch==2.24.0 pytorch-lightning==2.0.1 -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
COPY . /workspace/
|
||||
23
README.md
Normal file
23
README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
## Quickstart
|
||||
|
||||
### 构建镜像
|
||||
```bash
|
||||
docker build -t text2video:v0.1 .
|
||||
```
|
||||
|
||||
### 模型下载
|
||||
模型地址:https://modelscope.cn/models/iic/text-to-video-synthesis
|
||||
并放到目录:`/mnt/contest_ceph/zhanghao/models/iic/text-to-video-synthesis`(如更改目录,请修改后面的执行脚本中的模型路径)
|
||||
|
||||
### 测试程序
|
||||
1. 准备输入数据集,可以参考示例`dataset.json`
|
||||
2. 在docker镜像里运行测试程序,会根据`dataset.json`内容,在`output`目录下生成视频文件。
|
||||
```bash
|
||||
./run_in_docker.sh
|
||||
```
|
||||
|
||||
## 测试结果
|
||||
| | A100 平均生成时间(秒) | MLU-x4 平均生成时间(秒) | MLU-x8 平均生成时间(秒)|
|
||||
|------|-------------------------|----------------------------|---------------------------|
|
||||
| 时间 | 12 | 37 | 45
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
[
|
||||
"An image of a squirrel in Picasso style",
|
||||
"A cozy cabin in the woods, watercolor painting"
|
||||
]
|
||||
"A panda is eating burgers and french fries",
|
||||
"A sheep is walking and eating in the grass"
|
||||
]
|
||||
|
||||
16
iic.py
16
iic.py
@@ -1,16 +0,0 @@
|
||||
import os
|
||||
import torch
|
||||
device = "cuda" if torch.cuda.is_available() else "npu"
|
||||
import patch
|
||||
|
||||
from modelscope.pipelines import pipeline
|
||||
from modelscope.outputs import OutputKeys
|
||||
|
||||
model_path = "/mnt/contest_ceph/zhanghao/models/iic/text-to-video-synthesis"
|
||||
p = pipeline('text-to-video-synthesis', model_path, device=device)
|
||||
|
||||
test_text = {
|
||||
'text': 'A panda eating a burger and french fries on a rock.',
|
||||
}
|
||||
output_video_path = p(test_text, device=device, output_video='./output.mp4')[OutputKeys.OUTPUT_VIDEO]
|
||||
print('output_video_path:', output_video_path)
|
||||
28
main.py
28
main.py
@@ -13,6 +13,8 @@ import patch
|
||||
from modelscope.pipelines import pipeline
|
||||
from modelscope.outputs import OutputKeys
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline
|
||||
from diffusers.utils import export_to_video
|
||||
|
||||
|
||||
def safe_stem(text: str, maxlen: int = 60) -> str:
|
||||
@@ -47,12 +49,19 @@ def load_prompts(json_path: Path):
|
||||
return prompts
|
||||
|
||||
|
||||
def build_pipeline(model_path: str, device: str = "cuda", dtype=torch.float16):
|
||||
pipe = pipeline('text-to-video-synthesis', model_path, device=device)
|
||||
def build_pipeline(model_path: str, device: str = "cuda", dtype=torch.float16, model_type: str = "text-to-video-synthesis"):
|
||||
if model_type == "text-to-video-synthesis":
|
||||
pipe = pipeline('text-to-video-synthesis', model_path, device=device)
|
||||
elif model_type == "text-to-video-ms":
|
||||
pipe = DiffusionPipeline.from_pretrained(model_path, torch_dtype=dtype)
|
||||
pipe.enable_model_cpu_offload() # 省显存
|
||||
pipe.enable_vae_slicing()
|
||||
else:
|
||||
raise ValueError(f"不支持的模型类型: {model_type}")
|
||||
return pipe
|
||||
|
||||
|
||||
def generate_one(pipe, cfg: dict, out_dir: Path, index: int):
|
||||
def generate_one(pipe, cfg: dict, out_dir: Path, index: int, model_type: str = "text-to-video-synthesis"):
|
||||
"""
|
||||
依据 cfg 生成一张图并返回 (保存路径, 耗时秒, 详细参数)
|
||||
支持字段:
|
||||
@@ -65,7 +74,13 @@ def generate_one(pipe, cfg: dict, out_dir: Path, index: int):
|
||||
out_path = out_dir / filename
|
||||
|
||||
start = time.time()
|
||||
output_video_path = pipe({"text": prompt}, output_video=str(out_path))[OutputKeys.OUTPUT_VIDEO]
|
||||
if model_type == "text-to-video-synthesis":
|
||||
output_video_path = pipe({"text": prompt}, output_video=str(out_path))[OutputKeys.OUTPUT_VIDEO]
|
||||
elif model_type == "text-to-video-ms":
|
||||
frames = pipe(prompt, num_frames=16).frames[0]
|
||||
export_to_video(frames, str(out_path))
|
||||
else:
|
||||
raise ValueError(f"不支持的模型类型: {model_type}")
|
||||
elapsed = time.time() - start
|
||||
|
||||
detail = {
|
||||
@@ -87,6 +102,7 @@ def main():
|
||||
parser.add_argument("--outdir", required=True, help="图片输出目录")
|
||||
parser.add_argument("--device", default="cuda", help="推理设备")
|
||||
parser.add_argument("--dtype", default="fp16", choices=["fp16", "fp32"], help="推理精度")
|
||||
parser.add_argument("--model_type", default="text-to-video-synthesis", choices=["text-to-video-synthesis", "text-to-video-ms"], help="模型类型")
|
||||
args, _ = parser.parse_known_args()
|
||||
|
||||
model_path = args.model
|
||||
@@ -103,12 +119,12 @@ def main():
|
||||
if not prompts:
|
||||
raise ValueError("测试列表为空。")
|
||||
|
||||
pipe = build_pipeline(model_path=model_path, device=args.device, dtype=dtype)
|
||||
pipe = build_pipeline(model_path=model_path, device=args.device, dtype=dtype, model_type=args.model_type)
|
||||
|
||||
records = []
|
||||
total_start = time.time()
|
||||
for i, cfg in enumerate(prompts, 1):
|
||||
out_path, elapsed, detail = generate_one(pipe, cfg, out_dir, i)
|
||||
out_path, elapsed, detail = generate_one(pipe, cfg, out_dir, i, model_type=args.model_type)
|
||||
print(f"[{i}/{len(prompts)}] saved: {out_path.name} elapsed: {elapsed:.3f}s")
|
||||
records.append(detail)
|
||||
total_elapsed = round(time.time() - total_start, 6)
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
27d26
|
||||
< assert eles[0] in ['cpu', 'cuda', 'gpu'], err_msg
|
||||
@@ -1,10 +0,0 @@
|
||||
60a61
|
||||
> print(f"kwargs: {kwargs}")
|
||||
62a64
|
||||
> print(f"device: {self.device}")
|
||||
129c131
|
||||
< layer='penultimate')
|
||||
---
|
||||
> layer='penultimate', device=self.device)
|
||||
224a227
|
||||
> print(f"self.device: {self.device}")
|
||||
2
patch.py
2
patch.py
@@ -22,7 +22,7 @@ try:
|
||||
if torch.npu.is_available() and not torch.cuda.is_available():
|
||||
enable_cuda_to_npu_shim()
|
||||
except:
|
||||
print("exception")
|
||||
print("no npu. use native cuda")
|
||||
|
||||
# 1) 可选:如果你的权重来自 lightning 的 ckpt,放行其类(仅在可信来源时)
|
||||
try:
|
||||
|
||||
5
run_in_docker.sh
Executable file
5
run_in_docker.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#! /usr/bin/env bash
|
||||
# cnmon
|
||||
image=text2video:v0.1
|
||||
device_id=0
|
||||
docker run -v `pwd`:/workspace -v /mnt:/mnt --device=/dev/cambricon_dev$device_id:/dev/cambricon_dev0 --device=/dev/cambricon_ctl:/dev/cambricon_ctl $image ./test.sh
|
||||
@@ -1,3 +0,0 @@
|
||||
#! /usr/bin/env bash
|
||||
image=harbor-contest.4pd.io/zhanghao/t2v:a100-0.1
|
||||
docker run -it -v /home/zhanghao/workspace:/workspace -v /mnt:/mnt $image bash
|
||||
@@ -1,4 +0,0 @@
|
||||
#! /usr/bin/env bash
|
||||
image=harbor-contest.4pd.io/zhanghao/t2v:ascend-0.1
|
||||
device=0
|
||||
docker run -it -v `pwd`:/host -e ASCEND_VISIBLE_DEVICES=$device --device /dev/davinci$device:/dev/davinci0 --device /dev/davinci_manager --device /dev/devmm_svm --device /dev/hisi_hdc -v /mnt:/mnt -v /usr/local/dcmi:/usr/local/dcmi -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi -v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ -v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info -v /etc/ascend_install.info:/etc/ascend_install.info --privileged --entrypoint bash $image
|
||||
@@ -1,6 +0,0 @@
|
||||
#! /usr/bin/env bash
|
||||
# cnmon
|
||||
image=harbor-contest.4pd.io/zhanghao/iic:mlu370
|
||||
image=harbor-contest.4pd.io/zhanghao/t2v:mlu370-0.1
|
||||
device_id=2
|
||||
docker run -it -v /root/zhanghao:/workspace -v /mnt:/mnt --device=/dev/cambricon_dev$device_id:/dev/cambricon_dev0 --device=/dev/cambricon_ctl:/dev/cambricon_ctl $image bash
|
||||
@@ -1,5 +0,0 @@
|
||||
#! /usr/bin/env bash
|
||||
# cnmon
|
||||
image=harbor-contest.4pd.io/zhanghao/t2v:r200-0.1
|
||||
device_id=2
|
||||
docker run -it -v /root/zhanghao:/workspace -v /mnt:/mnt --security-opt=seccomp=unconfined --cap-add=SYS_PTRACE --cap-add=SYS_ADMIN --device /dev/fuse --shm-size=32g --ulimit=memlock=-1 --ulimit=nofile=120000 --ulimit=stack=67108864 --device=/dev/xpu$device_id:/dev/xpu0 --device=/dev/xpuctrl:/dev/xpuctrl $image bash
|
||||
Reference in New Issue
Block a user