Files
2026-02-04 17:22:39 +08:00

35 lines
901 B
Python

from vllm import LLM, SamplingParams
from PIL import Image
from dataclasses import dataclass
from typing import Literal
@dataclass(frozen=True)
class ImageAssetLocal:
name: Literal["stop_sign", "cherry_blossom"]
@property
def pil_image(self) -> Image.Image:
return Image.open(f"tools/ci/ci_files/{self.name}.jpg")
def run_llava():
llm = LLM(model="/data/AE/llm/models/llava-1.5-7b-hf/")
sampling_params = SamplingParams(max_tokens=100)
prompt = "USER: <image>\nWhat is the content of this image?\nASSISTANT:"
image = ImageAssetLocal("stop_sign").pil_image
outputs = llm.generate({
"prompt": prompt,
"multi_modal_data": {
"image": image
},
}, sampling_params=sampling_params)
for o in outputs:
generated_text = o.outputs[0].text
print(generated_text)
if __name__ == "__main__":
run_llava()