commit 48d108dd0d51baa40fd06dcf96d9f6bd8d2d0225 Author: ZHANG Hao Date: Tue Sep 2 16:54:16 2025 +0800 support iic/text-to-video-synthesis diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..aba2f10 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,4 @@ +*.whl filter=lfs diff=lfs merge=lfs -text +*.a filter=lfs diff=lfs merge=lfs -text +*.mp4 filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..520b647 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,11 @@ +FROM git.modelhub.org.cn:9443/enginex-cambricon/mlu370-pytorch:v25.01-torch2.5.0-torchmlu1.24.1-ubuntu22.04-py310 + +WORKDIR /workspace +ENV PATH=/torch/venv3/pytorch_infer/bin:/workspace/ffmpeg-mlu-v4.2.0/install/bin:/usr/local/neuware/bin:/usr/local/openmpi/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + +RUN pip install diffusers==0.34.0 +RUN pip install open_clip_torch==2.24.0 sortedcontainers modelscope av==11.0.0 +RUN sed -i 's|source /torch/venv3/pytorch/bin/activate|source /torch/venv3/pytorch_infer/bin/activate|' /root/.bashrc + +COPY . /workspace/ +RUN pip install whls/cambricon_pytorch_lightning-2.5.0+mlu0.7.0-py3-none-any.whl diff --git a/dataset.json b/dataset.json new file mode 100644 index 0000000..135bd87 --- /dev/null +++ b/dataset.json @@ -0,0 +1,4 @@ +[ + "An image of a squirrel in Picasso style", + "A cozy cabin in the woods, watercolor painting" +] \ No newline at end of file diff --git a/iic.py b/iic.py new file mode 100644 index 0000000..04e30fd --- /dev/null +++ b/iic.py @@ -0,0 +1,14 @@ +import os + +from modelscope.pipelines import pipeline +from modelscope.outputs import OutputKeys + +model_path = "/mnt/contest_ceph/zhanghao/models/iic/text-to-video-synthesis" +p = pipeline('text-to-video-synthesis', model_path) + +test_text = { + 'text': 'A panda eating bamboo on a rock.', + } +output_video_path = p(test_text, output_video='./output.mp4')[OutputKeys.OUTPUT_VIDEO] +print('output_video_path:', output_video_path) + diff --git a/main.py b/main.py new file mode 100644 index 0000000..7a4d8e6 --- /dev/null +++ b/main.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import argparse +import json +import os +import re +import time +from datetime import datetime +from pathlib import Path + +import torch +from modelscope.pipelines import pipeline +from modelscope.outputs import OutputKeys + + +def safe_stem(text: str, maxlen: int = 60) -> str: + """将提示词转为安全的文件名片段。""" + text = re.sub(r"\s+", "_", text.strip()) + text = re.sub(r"[^A-Za-z0-9_\-]+", "", text) + return (text[:maxlen] or "image").strip("_") + + +def load_prompts(json_path: Path): + """ + 支持 JSON 结构: + 1) ["prompt 1", "prompt 2", ...] + """ + with open(json_path, "r", encoding="utf-8") as f: + data = json.load(f) + + prompts = [] + if isinstance(data, list): + if all(isinstance(x, str) for x in data): + for s in data: + prompts.append({"prompt": s}) + elif all(isinstance(x, dict) for x in data): + for obj in data: + if "prompt" not in obj: + raise ValueError("每个对象都需要包含 'prompt' 字段") + prompts.append(obj) + else: + raise ValueError("JSON 列表元素需全为字符串或全为对象。") + else: + raise ValueError("JSON 顶层必须是列表。") + return prompts + + +def build_pipeline(model_path: str, device: str = "cuda", dtype=torch.float16): + pipe = pipeline('text-to-video-synthesis', model_path) + return pipe + + +def generate_one(pipe, cfg: dict, out_dir: Path, index: int): + """ + 依据 cfg 生成一张图并返回 (保存路径, 耗时秒, 详细参数) + 支持字段: + - prompt (必需) + """ + prompt = cfg["prompt"] + stamp = datetime.now().strftime("%Y%m%d-%H%M%S") + stem = safe_stem(prompt) + filename = f"{index:03d}_{stem}_{stamp}.mp4" + out_path = out_dir / filename + + start = time.time() + output_video_path = pipe({"text": prompt}, output_video=str(out_path))[OutputKeys.OUTPUT_VIDEO] + elapsed = time.time() - start + + detail = { + "index": index, + "filename": filename, + "elapsed_seconds": round(elapsed, 6), + "prompt": prompt + } + return out_path, elapsed, detail + + +def main(): + parser = argparse.ArgumentParser( + description="Stable Diffusion 基准与批量生成脚本(JSON 结果)" + ) + parser.add_argument("--model", required=True, help="模型路径或模型名(本地目录或 HF 仓库名)") + parser.add_argument("--json", required=True, help="测试文本 JSON 文件路径") + parser.add_argument("--results", required=True, help="结果 JSON 文件输出路径(*.json)") + parser.add_argument("--outdir", required=True, help="图片输出目录") + parser.add_argument("--device", default="cuda", choices=["cuda", "cpu"], help="推理设备") + parser.add_argument("--dtype", default="fp16", choices=["fp16", "fp32"], help="推理精度") + args = parser.parse_args() + + model_path = args.model + json_path = Path(args.json) + results_path = Path(args.results) + out_dir = Path(args.outdir) + + out_dir.mkdir(parents=True, exist_ok=True) + results_path.parent.mkdir(parents=True, exist_ok=True) + + dtype = torch.float16 if args.dtype == "fp16" else torch.float32 + + prompts = load_prompts(json_path) + if not prompts: + raise ValueError("测试列表为空。") + + pipe = build_pipeline(model_path=model_path, device=args.device, dtype=dtype) + + records = [] + total_start = time.time() + for i, cfg in enumerate(prompts, 1): + out_path, elapsed, detail = generate_one(pipe, cfg, out_dir, i) + print(f"[{i}/{len(prompts)}] saved: {out_path.name} elapsed: {elapsed:.3f}s") + records.append(detail) + total_elapsed = round(time.time() - total_start, 6) + avg_latency = total_elapsed / len(records) if records else 0 + + # 结果 JSON 结构 + result_obj = { + "timestamp": datetime.now().isoformat(timespec="seconds"), + "model": model_path, + "device": str(getattr(pipe, "device", "unknown")), + "dtype": "fp16" if dtype == torch.float16 else "fp32", + "count": len(records), + "total_elapsed_seconds": total_elapsed, + "avg_latency": avg_latency, + "cases": records + } + + with open(results_path, "w", encoding="utf-8") as f: + json.dump(result_obj, f, ensure_ascii=False, indent=2) + + print(f"\nAll done. vidoes: {len(records)}, total_elapsed: {total_elapsed:.3f}s, avg_latency: {avg_latency:.3f}") + print(f"Results JSON: {results_path}") + print(f"Images dir : {out_dir.resolve()}") + + +if __name__ == "__main__": + main() diff --git a/run_in_docker_mlu370.sh b/run_in_docker_mlu370.sh new file mode 100755 index 0000000..09b540f --- /dev/null +++ b/run_in_docker_mlu370.sh @@ -0,0 +1,5 @@ +#! /usr/bin/env bash +# cnmon +image=harbor-contest.4pd.io/zhanghao/iic:mlu370 +device_id=2 +docker run -it -v /root/zhanghao:/workspace -v /mnt:/mnt --device=/dev/cambricon_dev$device_id:/dev/cambricon_dev0 --device=/dev/cambricon_ctl:/dev/cambricon_ctl $image bash diff --git a/test.sh b/test.sh new file mode 100755 index 0000000..e342652 --- /dev/null +++ b/test.sh @@ -0,0 +1 @@ +python3 main.py --model "/mnt/contest_ceph/zhanghao/models/iic/text-to-video-synthesis" --json "dataset.json" --results "results.json" --outdir "output" --device cuda --dtype fp16 diff --git a/whls/cambricon_pytorch_lightning-2.5.0+mlu0.7.0-py3-none-any.whl b/whls/cambricon_pytorch_lightning-2.5.0+mlu0.7.0-py3-none-any.whl new file mode 100644 index 0000000..93593cc --- /dev/null +++ b/whls/cambricon_pytorch_lightning-2.5.0+mlu0.7.0-py3-none-any.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:281f8517b0798e664057194d158f6810b430db2265c660a0200addb1c1179251 +size 1631044