diff --git a/examples/offline_embed.py b/examples/offline_embed.py index 9b42587..91fba38 100644 --- a/examples/offline_embed.py +++ b/examples/offline_embed.py @@ -18,11 +18,13 @@ # import os + os.environ["VLLM_USE_MODELSCOPE"] = "True" import torch from vllm import LLM + def get_detailed_instruct(task_description: str, query: str) -> str: return f'Instruct: {task_description}\nQuery:{query}'