29 lines
1.1 KiB
Python
29 lines
1.1 KiB
Python
from typing import Dict, List, Any
|
|
from transformers import pipeline
|
|
|
|
import torch.nn.functional as F
|
|
from torch import Tensor
|
|
from transformers import AutoTokenizer, AutoModel
|
|
|
|
def average_pool(last_hidden_states: Tensor,
|
|
attention_mask: Tensor) -> Tensor:
|
|
last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)
|
|
return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
|
|
|
|
class EndpointHandler():
|
|
def __init__(self, path=""):
|
|
self.pipeline = pipeline("feature-extraction", model=path)
|
|
self.tokenizer = AutoTokenizer.from_pretrained(path)
|
|
self.model = AutoModel.from_pretrained(path)
|
|
|
|
def __call__(self, data: Dict[str, Any]) -> List[List[int]]:
|
|
inputs = data.pop("inputs",data)
|
|
|
|
batch_dict = self.tokenizer(inputs, max_length=512, padding=True, truncation=True, return_tensors='pt')
|
|
|
|
outputs = self.model(**batch_dict)
|
|
|
|
embeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
|
|
embeddings = F.normalize(embeddings, p=2, dim=1).tolist()
|
|
|
|
return embeddings |