init
This commit is contained in:
0
transformers/utils/test_module/__init__.py
Normal file
0
transformers/utils/test_module/__init__.py
Normal file
9
transformers/utils/test_module/custom_configuration.py
Normal file
9
transformers/utils/test_module/custom_configuration.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from transformers import PretrainedConfig
|
||||
|
||||
|
||||
class CustomConfig(PretrainedConfig):
|
||||
model_type = "custom"
|
||||
|
||||
def __init__(self, attribute=1, **kwargs):
|
||||
self.attribute = attribute
|
||||
super().__init__(**kwargs)
|
||||
@@ -0,0 +1,5 @@
|
||||
from transformers import Wav2Vec2FeatureExtractor
|
||||
|
||||
|
||||
class CustomFeatureExtractor(Wav2Vec2FeatureExtractor):
|
||||
pass
|
||||
@@ -0,0 +1,5 @@
|
||||
from transformers import CLIPImageProcessor
|
||||
|
||||
|
||||
class CustomImageProcessor(CLIPImageProcessor):
|
||||
pass
|
||||
19
transformers/utils/test_module/custom_modeling.py
Normal file
19
transformers/utils/test_module/custom_modeling.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import torch
|
||||
|
||||
from transformers import PreTrainedModel
|
||||
|
||||
from .custom_configuration import CustomConfig
|
||||
|
||||
|
||||
class CustomModel(PreTrainedModel):
|
||||
config_class = CustomConfig
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.linear = torch.nn.Linear(config.hidden_size, config.hidden_size)
|
||||
|
||||
def forward(self, x):
|
||||
return self.linear(x)
|
||||
|
||||
def _init_weights(self, module):
|
||||
pass
|
||||
33
transformers/utils/test_module/custom_pipeline.py
Normal file
33
transformers/utils/test_module/custom_pipeline.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import numpy as np
|
||||
|
||||
from transformers import Pipeline
|
||||
|
||||
|
||||
def softmax(outputs):
|
||||
maxes = np.max(outputs, axis=-1, keepdims=True)
|
||||
shifted_exp = np.exp(outputs - maxes)
|
||||
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
|
||||
|
||||
|
||||
class PairClassificationPipeline(Pipeline):
|
||||
def _sanitize_parameters(self, **kwargs):
|
||||
preprocess_kwargs = {}
|
||||
if "second_text" in kwargs:
|
||||
preprocess_kwargs["second_text"] = kwargs["second_text"]
|
||||
return preprocess_kwargs, {}, {}
|
||||
|
||||
def preprocess(self, text, second_text=None):
|
||||
return self.tokenizer(text, text_pair=second_text, return_tensors="pt")
|
||||
|
||||
def _forward(self, model_inputs):
|
||||
return self.model(**model_inputs)
|
||||
|
||||
def postprocess(self, model_outputs):
|
||||
logits = model_outputs.logits[0].numpy()
|
||||
probabilities = softmax(logits)
|
||||
|
||||
best_class = np.argmax(probabilities)
|
||||
label = self.model.config.id2label[best_class]
|
||||
score = probabilities[best_class].item()
|
||||
logits = logits.tolist()
|
||||
return {"label": label, "score": score, "logits": logits}
|
||||
6
transformers/utils/test_module/custom_processing.py
Normal file
6
transformers/utils/test_module/custom_processing.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from transformers import ProcessorMixin
|
||||
|
||||
|
||||
class CustomProcessor(ProcessorMixin):
|
||||
feature_extractor_class = "AutoFeatureExtractor"
|
||||
tokenizer_class = "AutoTokenizer"
|
||||
5
transformers/utils/test_module/custom_tokenization.py
Normal file
5
transformers/utils/test_module/custom_tokenization.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from transformers import BertTokenizer
|
||||
|
||||
|
||||
class CustomTokenizer(BertTokenizer):
|
||||
pass
|
||||
@@ -0,0 +1,8 @@
|
||||
from transformers import BertTokenizerFast
|
||||
|
||||
from .custom_tokenization import CustomTokenizer
|
||||
|
||||
|
||||
class CustomTokenizerFast(BertTokenizerFast):
|
||||
slow_tokenizer_class = CustomTokenizer
|
||||
pass
|
||||
@@ -0,0 +1,5 @@
|
||||
from transformers import LlavaOnevisionVideoProcessor
|
||||
|
||||
|
||||
class CustomVideoProcessor(LlavaOnevisionVideoProcessor):
|
||||
pass
|
||||
Reference in New Issue
Block a user