初始化项目,由ModelHub XC社区提供模型
Model: ClaudiaIoana550/try2_deploy_falcon Source: Original Platform
This commit is contained in:
35
.gitattributes
vendored
Normal file
35
.gitattributes
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
33
config.json
Normal file
33
config.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"alibi": false,
|
||||
"apply_residual_connection_post_layernorm": false,
|
||||
"architectures": [
|
||||
"FalconForCausalLM"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"auto_map": {
|
||||
"AutoConfig": "configuration_falcon.FalconConfig",
|
||||
"AutoModel": "modeling_falcon.FalconModel",
|
||||
"AutoModelForSequenceClassification": "modeling_falcon.FalconForSequenceClassification",
|
||||
"AutoModelForTokenClassification": "modeling_falcon.FalconForTokenClassification",
|
||||
"AutoModelForQuestionAnswering": "modeling_falcon.FalconForQuestionAnswering",
|
||||
"AutoModelForCausalLM": "modeling_falcon.FalconForCausalLM"
|
||||
},
|
||||
"bias": false,
|
||||
"bos_token_id": 11,
|
||||
"eos_token_id": 11,
|
||||
"hidden_dropout": 0.0,
|
||||
"hidden_size": 4544,
|
||||
"initializer_range": 0.02,
|
||||
"layer_norm_epsilon": 1e-05,
|
||||
"model_type": "falcon",
|
||||
"multi_query": true,
|
||||
"new_decoder_architecture": false,
|
||||
"num_attention_heads": 71,
|
||||
"num_hidden_layers": 32,
|
||||
"parallel_attn": true,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.27.4",
|
||||
"use_cache": true,
|
||||
"vocab_size": 65024
|
||||
}
|
||||
152
configuration_falcon.py
Normal file
152
configuration_falcon.py
Normal file
@@ -0,0 +1,152 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" Falcon configuration"""
|
||||
from transformers.configuration_utils import PretrainedConfig
|
||||
from transformers.utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
||||
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
|
||||
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
|
||||
}
|
||||
|
||||
|
||||
class FalconConfig(PretrainedConfig):
|
||||
r"""
|
||||
This is the configuration class to store the configuration of a [`FalconModel`]. It is used to instantiate a Falcon
|
||||
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
||||
defaults will yield a similar configuration to that of the
|
||||
[tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) architecture.
|
||||
|
||||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||||
documentation from [`PretrainedConfig`] for more information.
|
||||
|
||||
|
||||
Args:
|
||||
vocab_size (`int`, *optional*, defaults to 65024):
|
||||
Vocabulary size of the Falcon model. Defines the number of different tokens that can be represented by the
|
||||
`inputs_ids` passed when calling [`FalconModel`]
|
||||
hidden_size (`int`, *optional*, defaults to 4544):
|
||||
Dimension of the hidden representations.
|
||||
num_hidden_layers (`int`, *optional*, defaults to 32):
|
||||
Number of hidden layers in the Transformer decoder.
|
||||
num_attention_heads (`int`, *optional*, defaults to 71):
|
||||
Number of attention heads for each attention layer in the Transformer encoder.
|
||||
initializer_range (`float`, *optional*, defaults to 0.02):
|
||||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
||||
use_cache (`bool`, *optional*, defaults to `True`):
|
||||
Whether the model should return the last key/values attentions (not used by all models). Only relevant if
|
||||
`config.is_decoder=True`.
|
||||
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
|
||||
The epsilon used by the layer normalization layers.
|
||||
hidden_dropout (`float`, *optional*, defaults to 0.0):
|
||||
The dropout probability for MLP layers.
|
||||
attention_dropout (`float`, *optional*, defaults to 0.0):
|
||||
The dropout probability for attention layers.
|
||||
num_kv_heads (`int`, *optional*):
|
||||
Number of key-value heads to use per attention layer. If unset, defaults to the same value as
|
||||
`num_attention_heads`.
|
||||
alibi (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use ALiBi positional biases during self-attention.
|
||||
new_decoder_architecture (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use the new (Falcon-40B) decoder architecture. If `True`, the `multi_query` and `parallel_attn`
|
||||
arguments are ignored, as the new decoder always uses parallel attention.
|
||||
multi_query (`bool`, *optional*, defaults to `True`):
|
||||
Whether to use multi-query attention in the decoder. Ignored when `new_decoder_architecture` is `True`.
|
||||
parallel_attn (`bool`, *optional*, defaults to `True`):
|
||||
Whether to compute attention in parallel with the feedforward layer. If False, they are consecutive
|
||||
instead, as in the original Transformer architecture. Ignored when `new_decoder_architecture` is `True`.
|
||||
bias (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use bias on Linear layers.
|
||||
bos_token_id (`int`, *optional*, defaults to 11):
|
||||
The id of the "beginning-of-sequence" token.
|
||||
eos_token_id (`int`, *optional*, defaults to 11):
|
||||
The id of the "end-of-sequence" token.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from transformers import FalconModel, FalconConfig
|
||||
|
||||
>>> # Initializing a small (2-layer) Falcon configuration
|
||||
>>> configuration = FalconConfig(num_hidden_layers=2)
|
||||
|
||||
>>> # Initializing a model from the small configuration
|
||||
>>> model = FalconModel(configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
>>> configuration = model.config
|
||||
```"""
|
||||
model_type = "falcon"
|
||||
keys_to_ignore_at_inference = ["past_key_values"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_size=65024,
|
||||
hidden_size=4544,
|
||||
num_hidden_layers=32,
|
||||
num_attention_heads=71,
|
||||
layer_norm_epsilon=1e-5,
|
||||
initializer_range=0.02,
|
||||
use_cache=True,
|
||||
hidden_dropout=0.0,
|
||||
attention_dropout=0.0,
|
||||
num_kv_heads=None,
|
||||
alibi=False,
|
||||
new_decoder_architecture=False,
|
||||
multi_query=True,
|
||||
parallel_attn=True,
|
||||
bias=False,
|
||||
bos_token_id=11,
|
||||
eos_token_id=11,
|
||||
**kwargs,
|
||||
):
|
||||
logger.warning_once(
|
||||
"\nWARNING: You are currently loading Falcon using legacy code contained in the model repository. Falcon has now been fully ported into the Hugging Face transformers library. "
|
||||
"For the most up-to-date and high-performance version of the Falcon model code, please update to the latest version of transformers and then load the model "
|
||||
"without the trust_remote_code=True argument.\n"
|
||||
)
|
||||
self.vocab_size = vocab_size
|
||||
# Backward compatibility with n_embed kwarg
|
||||
n_embed = kwargs.pop("n_embed", None)
|
||||
self.hidden_size = hidden_size if n_embed is None else n_embed
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.layer_norm_epsilon = layer_norm_epsilon
|
||||
self.initializer_range = initializer_range
|
||||
self.use_cache = use_cache
|
||||
self.hidden_dropout = hidden_dropout
|
||||
self.attention_dropout = attention_dropout
|
||||
|
||||
self.bos_token_id = bos_token_id
|
||||
self.eos_token_id = eos_token_id
|
||||
self.num_kv_heads = num_attention_heads if num_kv_heads is None else num_kv_heads
|
||||
self.alibi = alibi
|
||||
self.new_decoder_architecture = new_decoder_architecture
|
||||
self.multi_query = multi_query # Ignored when new_decoder_architecture is True
|
||||
self.parallel_attn = parallel_attn
|
||||
self.bias = bias
|
||||
|
||||
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
||||
|
||||
@property
|
||||
def head_dim(self):
|
||||
return self.hidden_size // self.num_attention_heads
|
||||
|
||||
@property
|
||||
def rotary(self):
|
||||
return not self.alibi
|
||||
6
generation_config.json
Normal file
6
generation_config.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"_from_model_config": true,
|
||||
"bos_token_id": 11,
|
||||
"eos_token_id": 11,
|
||||
"transformers_version": "4.30.0"
|
||||
}
|
||||
105
handler.py
Normal file
105
handler.py
Normal file
@@ -0,0 +1,105 @@
|
||||
|
||||
from typing import Any, Dict, List
|
||||
from langchain.llms import HuggingFacePipeline
|
||||
|
||||
import torch
|
||||
import transformers
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
|
||||
|
||||
from transformers import (
|
||||
StoppingCriteria,
|
||||
StoppingCriteriaList,
|
||||
pipeline,
|
||||
)
|
||||
from typing import List
|
||||
import torch
|
||||
|
||||
class StopGenerationCriteria(StoppingCriteria):
|
||||
def __init__(self, max_duplicate_sequences=3, max_repeated_words=2):
|
||||
self.generated_sequences = set()
|
||||
self.max_duplicate_sequences = max_duplicate_sequences
|
||||
self.max_repeated_words = max_repeated_words
|
||||
|
||||
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
||||
tokenizer=AutoTokenizer.from_pretrained("ClaudiaIoana550/try2_deploy_falcon", trust_remote_code=True)
|
||||
generated_sequence = input_ids.tolist()
|
||||
|
||||
if len(generated_sequence[0]) >= 50:
|
||||
sequen = generated_sequence[0][-30:]
|
||||
s_mare = str(generated_sequence[0]).strip("[]")
|
||||
s_mic = str(sequen).strip("[]")
|
||||
count2 = 0
|
||||
if s_mic in s_mare:
|
||||
count2 = sum(1 for i in range(len(generated_sequence[0]) - len(sequen) + 1) if generated_sequence[0][i:i + len(sequen)] == sequen)
|
||||
if count2 >= 2:
|
||||
return True
|
||||
|
||||
|
||||
generated_tokens = [tokenizer.decode(token_id) for token_id in input_ids[0]]
|
||||
count = 1
|
||||
prev_token = None
|
||||
for token in generated_tokens:
|
||||
if token == prev_token:
|
||||
count += 1
|
||||
if count > self.max_repeated_words:
|
||||
return True
|
||||
else:
|
||||
count = 1
|
||||
prev_token = token
|
||||
|
||||
if len(self.generated_sequences) >= self.max_duplicate_sequences:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
# Example usage:
|
||||
# Define the maximum number of duplicate sequences and repeated words
|
||||
max_duplicate_sequences = 1
|
||||
max_repeated_words = 2
|
||||
|
||||
# Create an instance of StopGenerationCriteria
|
||||
stop_criteria = StopGenerationCriteria(max_duplicate_sequences, max_repeated_words)
|
||||
|
||||
# Add the custom stopping criteria to a StoppingCriteriaList
|
||||
stopping_criteria = StoppingCriteriaList([stop_criteria])
|
||||
|
||||
|
||||
class EndpointHandler:
|
||||
def __init__(self, model_path=""):
|
||||
tokenizer=AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_path,
|
||||
return_dict=True,
|
||||
device_map="auto",
|
||||
torch_dtype = dtype,
|
||||
trust_remote_code=True
|
||||
)
|
||||
|
||||
generation_config = model.generation_config
|
||||
generation_config.max_new_tokens = 1700
|
||||
generation_config.min_length = 20
|
||||
generation_config.temperature = 1
|
||||
generation_config.top_p = 0.7
|
||||
generation_config.num_return_sequences = 1
|
||||
generation_config.pad_token_id = tokenizer.eos_token_id
|
||||
generation_config.eos_token_id = tokenizer.eos_token_id
|
||||
generation_config.repetition_penalty = 1.1
|
||||
|
||||
gpipeline = transformers.pipeline(
|
||||
model=model,
|
||||
tokenizer=tokenizer,
|
||||
return_full_text=True,
|
||||
task="text-generation",
|
||||
stopping_criteria=stopping_criteria,
|
||||
generation_config=generation_config
|
||||
)
|
||||
|
||||
self.llm = HuggingFacePipeline(pipeline=gpipeline)
|
||||
|
||||
def __call__(self, data:Dict[str, Any]) -> Dict[str, Any]:
|
||||
prompt = data.pop("inputs", data)
|
||||
result = self.llm(prompt)
|
||||
return result
|
||||
1262
modeling_falcon.py
Normal file
1262
modeling_falcon.py
Normal file
File diff suppressed because it is too large
Load Diff
3
pytorch_model-00001-of-00002.bin
Normal file
3
pytorch_model-00001-of-00002.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:79e273e3dfebd4396ba8e43d2acf885f25ad8cff5aa8028319fd3b12831c749a
|
||||
size 9951009355
|
||||
3
pytorch_model-00002-of-00002.bin
Normal file
3
pytorch_model-00002-of-00002.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:c9909f74d578b7692133c83ac547636a1aa742c3938142b34ffa521ae7e7f58f
|
||||
size 3892502005
|
||||
203
pytorch_model.bin.index.json
Normal file
203
pytorch_model.bin.index.json
Normal file
@@ -0,0 +1,203 @@
|
||||
{
|
||||
"metadata": {
|
||||
"total_size": 13843441408
|
||||
},
|
||||
"weight_map": {
|
||||
"lm_head.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.0.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.0.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.0.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.1.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.1.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.1.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.1.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.10.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.10.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.10.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.10.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.10.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.11.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.11.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.11.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.11.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.11.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.12.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.12.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.12.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.12.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.12.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.13.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.13.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.13.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.13.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.13.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.14.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.14.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.14.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.14.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.14.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.15.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.15.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.15.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.15.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.15.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.16.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.16.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.16.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.16.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.16.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.17.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.17.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.17.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.17.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.17.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.18.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.18.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.18.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.18.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.18.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.19.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.19.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.19.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.19.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.19.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.2.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.2.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.2.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.2.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.2.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.20.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.20.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.20.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.20.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.20.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.21.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.21.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.21.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.21.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.21.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.22.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.22.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.22.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.22.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.22.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.22.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.23.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.23.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.23.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.23.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.23.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.23.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.24.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.24.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.24.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.24.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.24.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.25.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.25.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.25.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.25.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.25.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.26.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.26.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.26.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.26.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.26.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.27.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.27.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.27.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.27.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.27.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.28.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.28.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.28.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.28.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.28.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.28.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.29.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.29.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.29.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.29.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.29.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.29.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.3.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.3.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.3.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.3.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.3.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.30.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.30.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.30.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.30.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.30.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.30.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.31.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.31.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.31.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.31.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.31.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.31.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.h.4.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.4.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.4.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.4.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.4.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.5.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.5.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.5.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.5.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.5.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.6.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.6.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.6.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.6.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.6.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.7.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.7.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.7.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.7.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.7.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.8.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.8.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.8.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.8.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.8.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.9.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.9.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.9.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.9.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.h.9.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
||||
"transformer.ln_f.bias": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.ln_f.weight": "pytorch_model-00002-of-00002.bin",
|
||||
"transformer.word_embeddings.weight": "pytorch_model-00001-of-00002.bin"
|
||||
}
|
||||
}
|
||||
7
requirements.txt
Normal file
7
requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
torch==2.0.1
|
||||
transformers==4.36.0
|
||||
bitsandbytes==0.40.0
|
||||
accelerate==0.21.0
|
||||
loralib==0.1.1
|
||||
einops==0.6.1
|
||||
langchain==0.0.233
|
||||
17
special_tokens_map.json
Normal file
17
special_tokens_map.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"additional_special_tokens": [
|
||||
">>TITLE<<",
|
||||
">>ABSTRACT<<",
|
||||
">>INTRODUCTION<<",
|
||||
">>SUMMARY<<",
|
||||
">>COMMENT<<",
|
||||
">>ANSWER<<",
|
||||
">>QUESTION<<",
|
||||
">>DOMAIN<<",
|
||||
">>PREFIX<<",
|
||||
">>SUFFIX<<",
|
||||
">>MIDDLE<<"
|
||||
],
|
||||
"eos_token": "<|endoftext|>",
|
||||
"pad_token": "<|endoftext|>"
|
||||
}
|
||||
129971
tokenizer.json
Normal file
129971
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
11
tokenizer_config.json
Normal file
11
tokenizer_config.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"add_prefix_space": false,
|
||||
"clean_up_tokenization_spaces": true,
|
||||
"eos_token": "<|endoftext|>",
|
||||
"model_input_names": [
|
||||
"input_ids",
|
||||
"attention_mask"
|
||||
],
|
||||
"model_max_length": 2048,
|
||||
"tokenizer_class": "PreTrainedTokenizerFast"
|
||||
}
|
||||
Reference in New Issue
Block a user