forked from EngineX-Cambricon/enginex-mlu370-vllm
add qwen3
This commit is contained in:
34
vllm-v0.6.2/examples/llava_example.py
Normal file
34
vllm-v0.6.2/examples/llava_example.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from vllm import LLM, SamplingParams
|
||||
from PIL import Image
|
||||
from dataclasses import dataclass
|
||||
from typing import Literal
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ImageAssetLocal:
|
||||
name: Literal["stop_sign", "cherry_blossom"]
|
||||
@property
|
||||
def pil_image(self) -> Image.Image:
|
||||
return Image.open(f"tools/ci/ci_files/{self.name}.jpg")
|
||||
|
||||
|
||||
def run_llava():
|
||||
llm = LLM(model="/data/AE/llm/models/llava-1.5-7b-hf/")
|
||||
sampling_params = SamplingParams(max_tokens=100)
|
||||
|
||||
prompt = "USER: <image>\nWhat is the content of this image?\nASSISTANT:"
|
||||
image = ImageAssetLocal("stop_sign").pil_image
|
||||
outputs = llm.generate({
|
||||
"prompt": prompt,
|
||||
"multi_modal_data": {
|
||||
"image": image
|
||||
},
|
||||
}, sampling_params=sampling_params)
|
||||
|
||||
for o in outputs:
|
||||
generated_text = o.outputs[0].text
|
||||
print(generated_text)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_llava()
|
||||
Reference in New Issue
Block a user