80 lines
2.6 KiB
Python
80 lines
2.6 KiB
Python
from typing import Any, Dict, List
|
|
|
|
import torch
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
class EndpointHandler:
|
|
def __init__(self, path: str = ""):
|
|
self.tokenizer = AutoTokenizer.from_pretrained(path)
|
|
|
|
if self.tokenizer.pad_token is None:
|
|
self.tokenizer.pad_token = self.tokenizer.eos_token
|
|
|
|
self.model = AutoModelForCausalLM.from_pretrained(
|
|
path,
|
|
device_map="auto",
|
|
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
|
)
|
|
self.model.eval()
|
|
|
|
def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
|
inputs = data.get("inputs", "")
|
|
parameters = data.get("parameters", {}) or {}
|
|
|
|
if isinstance(inputs, list):
|
|
prompt = "\n".join(str(x) for x in inputs)
|
|
else:
|
|
prompt = str(inputs)
|
|
|
|
max_new_tokens = int(parameters.get("max_new_tokens", 256))
|
|
temperature = float(parameters.get("temperature", 0.7))
|
|
top_p = float(parameters.get("top_p", 0.9))
|
|
do_sample = bool(parameters.get("do_sample", True))
|
|
|
|
messages = parameters.get("messages")
|
|
if messages and isinstance(messages, list):
|
|
if hasattr(self.tokenizer, "apply_chat_template"):
|
|
formatted_prompt = self.tokenizer.apply_chat_template(
|
|
messages,
|
|
tokenize=False,
|
|
add_generation_prompt=True,
|
|
)
|
|
else:
|
|
formatted_prompt = "\n".join(
|
|
f"{m.get('role', 'user')}: {m.get('content', '')}" for m in messages
|
|
)
|
|
else:
|
|
formatted_prompt = prompt
|
|
|
|
model_inputs = self.tokenizer(
|
|
formatted_prompt,
|
|
return_tensors="pt",
|
|
padding=True,
|
|
truncation=True,
|
|
)
|
|
|
|
model_inputs = {k: v.to(self.model.device) for k, v in model_inputs.items()}
|
|
|
|
with torch.no_grad():
|
|
outputs = self.model.generate(
|
|
**model_inputs,
|
|
max_new_tokens=max_new_tokens,
|
|
temperature=temperature,
|
|
top_p=top_p,
|
|
do_sample=do_sample,
|
|
pad_token_id=self.tokenizer.pad_token_id,
|
|
eos_token_id=self.tokenizer.eos_token_id,
|
|
)
|
|
|
|
generated_text = self.tokenizer.decode(
|
|
outputs[0],
|
|
skip_special_tokens=True,
|
|
)
|
|
|
|
if generated_text.startswith(formatted_prompt):
|
|
generated_text = generated_text[len(formatted_prompt):].strip()
|
|
|
|
return {
|
|
"generated_text": generated_text
|
|
} |