support iic/text-to-video-synthesis
This commit is contained in:
4
.gitattributes
vendored
Normal file
4
.gitattributes
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
*.whl filter=lfs diff=lfs merge=lfs -text
|
||||
*.a filter=lfs diff=lfs merge=lfs -text
|
||||
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
11
Dockerfile
Normal file
11
Dockerfile
Normal file
@@ -0,0 +1,11 @@
|
||||
FROM git.modelhub.org.cn:9443/enginex-cambricon/mlu370-pytorch:v25.01-torch2.5.0-torchmlu1.24.1-ubuntu22.04-py310
|
||||
|
||||
WORKDIR /workspace
|
||||
ENV PATH=/torch/venv3/pytorch_infer/bin:/workspace/ffmpeg-mlu-v4.2.0/install/bin:/usr/local/neuware/bin:/usr/local/openmpi/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
|
||||
RUN pip install diffusers==0.34.0
|
||||
RUN pip install open_clip_torch==2.24.0 sortedcontainers modelscope av==11.0.0
|
||||
RUN sed -i 's|source /torch/venv3/pytorch/bin/activate|source /torch/venv3/pytorch_infer/bin/activate|' /root/.bashrc
|
||||
|
||||
COPY . /workspace/
|
||||
RUN pip install whls/cambricon_pytorch_lightning-2.5.0+mlu0.7.0-py3-none-any.whl
|
||||
4
dataset.json
Normal file
4
dataset.json
Normal file
@@ -0,0 +1,4 @@
|
||||
[
|
||||
"An image of a squirrel in Picasso style",
|
||||
"A cozy cabin in the woods, watercolor painting"
|
||||
]
|
||||
14
iic.py
Normal file
14
iic.py
Normal file
@@ -0,0 +1,14 @@
|
||||
import os
|
||||
|
||||
from modelscope.pipelines import pipeline
|
||||
from modelscope.outputs import OutputKeys
|
||||
|
||||
model_path = "/mnt/contest_ceph/zhanghao/models/iic/text-to-video-synthesis"
|
||||
p = pipeline('text-to-video-synthesis', model_path)
|
||||
|
||||
test_text = {
|
||||
'text': 'A panda eating bamboo on a rock.',
|
||||
}
|
||||
output_video_path = p(test_text, output_video='./output.mp4')[OutputKeys.OUTPUT_VIDEO]
|
||||
print('output_video_path:', output_video_path)
|
||||
|
||||
137
main.py
Normal file
137
main.py
Normal file
@@ -0,0 +1,137 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from modelscope.pipelines import pipeline
|
||||
from modelscope.outputs import OutputKeys
|
||||
|
||||
|
||||
def safe_stem(text: str, maxlen: int = 60) -> str:
|
||||
"""将提示词转为安全的文件名片段。"""
|
||||
text = re.sub(r"\s+", "_", text.strip())
|
||||
text = re.sub(r"[^A-Za-z0-9_\-]+", "", text)
|
||||
return (text[:maxlen] or "image").strip("_")
|
||||
|
||||
|
||||
def load_prompts(json_path: Path):
|
||||
"""
|
||||
支持 JSON 结构:
|
||||
1) ["prompt 1", "prompt 2", ...]
|
||||
"""
|
||||
with open(json_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
|
||||
prompts = []
|
||||
if isinstance(data, list):
|
||||
if all(isinstance(x, str) for x in data):
|
||||
for s in data:
|
||||
prompts.append({"prompt": s})
|
||||
elif all(isinstance(x, dict) for x in data):
|
||||
for obj in data:
|
||||
if "prompt" not in obj:
|
||||
raise ValueError("每个对象都需要包含 'prompt' 字段")
|
||||
prompts.append(obj)
|
||||
else:
|
||||
raise ValueError("JSON 列表元素需全为字符串或全为对象。")
|
||||
else:
|
||||
raise ValueError("JSON 顶层必须是列表。")
|
||||
return prompts
|
||||
|
||||
|
||||
def build_pipeline(model_path: str, device: str = "cuda", dtype=torch.float16):
|
||||
pipe = pipeline('text-to-video-synthesis', model_path)
|
||||
return pipe
|
||||
|
||||
|
||||
def generate_one(pipe, cfg: dict, out_dir: Path, index: int):
|
||||
"""
|
||||
依据 cfg 生成一张图并返回 (保存路径, 耗时秒, 详细参数)
|
||||
支持字段:
|
||||
- prompt (必需)
|
||||
"""
|
||||
prompt = cfg["prompt"]
|
||||
stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
stem = safe_stem(prompt)
|
||||
filename = f"{index:03d}_{stem}_{stamp}.mp4"
|
||||
out_path = out_dir / filename
|
||||
|
||||
start = time.time()
|
||||
output_video_path = pipe({"text": prompt}, output_video=str(out_path))[OutputKeys.OUTPUT_VIDEO]
|
||||
elapsed = time.time() - start
|
||||
|
||||
detail = {
|
||||
"index": index,
|
||||
"filename": filename,
|
||||
"elapsed_seconds": round(elapsed, 6),
|
||||
"prompt": prompt
|
||||
}
|
||||
return out_path, elapsed, detail
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Stable Diffusion 基准与批量生成脚本(JSON 结果)"
|
||||
)
|
||||
parser.add_argument("--model", required=True, help="模型路径或模型名(本地目录或 HF 仓库名)")
|
||||
parser.add_argument("--json", required=True, help="测试文本 JSON 文件路径")
|
||||
parser.add_argument("--results", required=True, help="结果 JSON 文件输出路径(*.json)")
|
||||
parser.add_argument("--outdir", required=True, help="图片输出目录")
|
||||
parser.add_argument("--device", default="cuda", choices=["cuda", "cpu"], help="推理设备")
|
||||
parser.add_argument("--dtype", default="fp16", choices=["fp16", "fp32"], help="推理精度")
|
||||
args = parser.parse_args()
|
||||
|
||||
model_path = args.model
|
||||
json_path = Path(args.json)
|
||||
results_path = Path(args.results)
|
||||
out_dir = Path(args.outdir)
|
||||
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
results_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
dtype = torch.float16 if args.dtype == "fp16" else torch.float32
|
||||
|
||||
prompts = load_prompts(json_path)
|
||||
if not prompts:
|
||||
raise ValueError("测试列表为空。")
|
||||
|
||||
pipe = build_pipeline(model_path=model_path, device=args.device, dtype=dtype)
|
||||
|
||||
records = []
|
||||
total_start = time.time()
|
||||
for i, cfg in enumerate(prompts, 1):
|
||||
out_path, elapsed, detail = generate_one(pipe, cfg, out_dir, i)
|
||||
print(f"[{i}/{len(prompts)}] saved: {out_path.name} elapsed: {elapsed:.3f}s")
|
||||
records.append(detail)
|
||||
total_elapsed = round(time.time() - total_start, 6)
|
||||
avg_latency = total_elapsed / len(records) if records else 0
|
||||
|
||||
# 结果 JSON 结构
|
||||
result_obj = {
|
||||
"timestamp": datetime.now().isoformat(timespec="seconds"),
|
||||
"model": model_path,
|
||||
"device": str(getattr(pipe, "device", "unknown")),
|
||||
"dtype": "fp16" if dtype == torch.float16 else "fp32",
|
||||
"count": len(records),
|
||||
"total_elapsed_seconds": total_elapsed,
|
||||
"avg_latency": avg_latency,
|
||||
"cases": records
|
||||
}
|
||||
|
||||
with open(results_path, "w", encoding="utf-8") as f:
|
||||
json.dump(result_obj, f, ensure_ascii=False, indent=2)
|
||||
|
||||
print(f"\nAll done. vidoes: {len(records)}, total_elapsed: {total_elapsed:.3f}s, avg_latency: {avg_latency:.3f}")
|
||||
print(f"Results JSON: {results_path}")
|
||||
print(f"Images dir : {out_dir.resolve()}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
5
run_in_docker_mlu370.sh
Executable file
5
run_in_docker_mlu370.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#! /usr/bin/env bash
|
||||
# cnmon
|
||||
image=harbor-contest.4pd.io/zhanghao/iic:mlu370
|
||||
device_id=2
|
||||
docker run -it -v /root/zhanghao:/workspace -v /mnt:/mnt --device=/dev/cambricon_dev$device_id:/dev/cambricon_dev0 --device=/dev/cambricon_ctl:/dev/cambricon_ctl $image bash
|
||||
1
test.sh
Executable file
1
test.sh
Executable file
@@ -0,0 +1 @@
|
||||
python3 main.py --model "/mnt/contest_ceph/zhanghao/models/iic/text-to-video-synthesis" --json "dataset.json" --results "results.json" --outdir "output" --device cuda --dtype fp16
|
||||
BIN
whls/cambricon_pytorch_lightning-2.5.0+mlu0.7.0-py3-none-any.whl
(Stored with Git LFS)
Normal file
BIN
whls/cambricon_pytorch_lightning-2.5.0+mlu0.7.0-py3-none-any.whl
(Stored with Git LFS)
Normal file
Binary file not shown.
Reference in New Issue
Block a user