add ascend support
This commit is contained in:
5
Dockerfile.ascend
Normal file
5
Dockerfile.ascend
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM quay.io/ascend/vllm-ascend:v0.10.0rc1
|
||||
|
||||
WORKDIR /workspace
|
||||
RUN pip install diffusers==0.34.0
|
||||
COPY main.py dataset.json /workspace/
|
||||
24
main.py
24
main.py
@@ -54,8 +54,12 @@ def build_pipeline(model_path: str, device: str = "cuda", dtype=torch.float16):
|
||||
use_safetensors=True,
|
||||
)
|
||||
# 设备放置
|
||||
if device == "cuda" and torch.cuda.is_available():
|
||||
pipe.to("cuda")
|
||||
if device == "cuda":
|
||||
if torch.cuda.is_available():
|
||||
pipe.to("cuda")
|
||||
elif torch.npu.is_available():
|
||||
pipe.to("npu")
|
||||
|
||||
try:
|
||||
pipe.enable_attention_slicing()
|
||||
except Exception:
|
||||
@@ -92,17 +96,17 @@ def generate_one(pipe: DiffusionPipeline, cfg: dict, out_dir: Path, index: int):
|
||||
height = cfg.get("height", None)
|
||||
|
||||
# 随机数生成器(与管线设备一致)
|
||||
gen = None
|
||||
try:
|
||||
device_str = str(getattr(pipe, "device", "cuda" if torch.cuda.is_available() else "cpu"))
|
||||
except Exception:
|
||||
device_str = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
if seed is not None:
|
||||
gen = torch.Generator(device=device_str).manual_seed(int(seed))
|
||||
# gen = None
|
||||
# try:
|
||||
# device_str = str(getattr(pipe, "device", "cuda" if torch.cuda.is_available() else "npu" if torch.npu.is_available() else "cpu"))
|
||||
# except Exception:
|
||||
# device_str = "cuda" if torch.cuda.is_available() else "npu" if torch.npu.is_available() else "cpu"
|
||||
# if seed is not None:
|
||||
# gen = torch.Generator(device=device_str).manual_seed(int(seed))
|
||||
|
||||
call_kwargs = dict(
|
||||
prompt=prompt,
|
||||
generator=gen,
|
||||
# generator=gen,
|
||||
)
|
||||
if width is not None and height is not None:
|
||||
call_kwargs.update({"width": int(width), "height": int(height)})
|
||||
|
||||
4
run_in_docker_ascend.sh
Executable file
4
run_in_docker_ascend.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#! /usr/bin/env bash
|
||||
image=harbor-contest.4pd.io/zhanghao/diffusers:ascend-0.2
|
||||
device=1
|
||||
docker run -it -v `pwd`:/host -e ASCEND_VISIBLE_DEVICES=1 --device /dev/davinci$device:/dev/davinci0 --device /dev/davinci_manager --device /dev/devmm_svm --device /dev/hisi_hdc -v /mnt:/mnt -v /usr/local/dcmi:/usr/local/dcmi -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi -v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ -v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info -v /etc/ascend_install.info:/etc/ascend_install.info --privileged --entrypoint bash $image
|
||||
5
test.py
5
test.py
@@ -5,9 +5,10 @@ import time
|
||||
model_path = "/mnt/contest_ceph/zhanghao/models/stable-diffusion-v1-5"
|
||||
# model_path = "/mnt/contest_ceph/zhanghao/models/stable-diffusion-3.5-medium"
|
||||
pipeline = DiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16)
|
||||
pipeline.to("cuda")
|
||||
device = "cuda" if torch.cuda.is_available() else "npu" if torch.npu.is_available() else "cpu"
|
||||
pipeline.to(device)
|
||||
start = time.time()
|
||||
image = pipeline("An image of a squirrel in Picasso style").images[0]
|
||||
end = time.time()
|
||||
print(f"elapsed: {end - start}")
|
||||
image.save("squirrel_picasso.png")
|
||||
image.save("squirrel_picasso.png")
|
||||
|
||||
Reference in New Issue
Block a user