61 lines
2.0 KiB
Python
61 lines
2.0 KiB
Python
import os
|
|
import torch
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
class EndpointHandler:
|
|
def __init__(self, path: str = ""):
|
|
model_dir = path or "/repository"
|
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained(
|
|
model_dir,
|
|
trust_remote_code=True,
|
|
)
|
|
|
|
# Ensure pad token exists for generation
|
|
if self.tokenizer.pad_token_id is None:
|
|
self.tokenizer.pad_token = self.tokenizer.eos_token
|
|
|
|
self.model = AutoModelForCausalLM.from_pretrained(
|
|
model_dir,
|
|
trust_remote_code=True,
|
|
torch_dtype=torch.float16,
|
|
low_cpu_mem_usage=True,
|
|
device_map="auto",
|
|
)
|
|
self.model.eval()
|
|
|
|
def __call__(self, data):
|
|
inputs = data.get("inputs", "")
|
|
params = data.get("parameters", {}) or {}
|
|
|
|
max_new_tokens = int(params.get("max_new_tokens", 128))
|
|
temperature = float(params.get("temperature", 0.0))
|
|
top_p = float(params.get("top_p", 1.0))
|
|
do_sample = bool(params.get("do_sample", temperature > 0))
|
|
|
|
# Accept either plain string input or chat-style messages
|
|
if isinstance(inputs, list):
|
|
prompt = self.tokenizer.apply_chat_template(
|
|
inputs,
|
|
tokenize=False,
|
|
add_generation_prompt=True,
|
|
)
|
|
else:
|
|
prompt = str(inputs)
|
|
|
|
enc = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
|
|
|
|
with torch.no_grad():
|
|
out = self.model.generate(
|
|
**enc,
|
|
max_new_tokens=max_new_tokens,
|
|
temperature=temperature,
|
|
top_p=top_p,
|
|
do_sample=do_sample,
|
|
pad_token_id=self.tokenizer.pad_token_id,
|
|
eos_token_id=self.tokenizer.eos_token_id,
|
|
)
|
|
|
|
generated_ids = out[0][enc["input_ids"].shape[-1]:]
|
|
text = self.tokenizer.decode(generated_ids, skip_special_tokens=True)
|
|
return {"generated_text": text} |