Upload folder using huggingface_hub (#1)
- 3629499b7f0f5d9b7290ee866e480597430e728b5a87749e3f081628331fb108 (ef3eecf8ddfb0162c2a193dc2994c07dfd606c3e) - 3778ffe34626936b804e0b940cb8fb912db8599efedfbb248f87cd6511e2731b (97a423976d2218d6fe6c5a7bd6009b48cccdfbe0) - 52784d9778992d091d82d05e28031cb2405e52939b9ce4f9fac3d4c721f8ef32 (7dc0b68baaa8870615ec6ed99c53eef0fa481012) - 664bb4b330c67987c5d81579c56c62758979c0d4ab99315f6e241d2c89ff0388 (6b2044e4809bd11cc1319dfc7f7d809395fe8913) - e84d036c237c60c75bd8c1d5f1972443c9515980f827b396e18468b5eb1aaf3c (684ae7cab7c31853a134b0e25780a1fe52853325) - f1945ae592e762ecefdd284d8970a510817a9fc5d444ba694757faa3f220e791 (a4a4294e8f50f8f32f2ad62181bce43f7b616dbc)
This commit is contained in:
38
.gitattributes
vendored
38
.gitattributes
vendored
@@ -1,47 +1,41 @@
|
|||||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||||
*.model filter=lfs diff=lfs merge=lfs -text
|
*.model filter=lfs diff=lfs merge=lfs -text
|
||||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.db* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ark* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
|
|
||||||
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.gguf* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ggml filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.llamafile* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pt2 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00001-of-00006.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00002-of-00006.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00003-of-00006.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00004-of-00006.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00005-of-00006.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00006-of-00006.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
|||||||
70
README.md
70
README.md
@@ -1,47 +1,35 @@
|
|||||||
---
|
---
|
||||||
license: Apache License 2.0
|
language:
|
||||||
|
- multilingual
|
||||||
#model-type:
|
license: mit
|
||||||
##如 gpt、phi、llama、chatglm、baichuan 等
|
license_link: https://huggingface.co/microsoft/Phi-3-medium-128k-instruct/resolve/main/LICENSE
|
||||||
#- gpt
|
pipeline_tag: text-generation
|
||||||
|
tags:
|
||||||
#domain:
|
- nlp
|
||||||
##如 nlp、cv、audio、multi-modal
|
- code
|
||||||
#- nlp
|
- mlx
|
||||||
|
inference:
|
||||||
#language:
|
parameters:
|
||||||
##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
|
temperature: 0.7
|
||||||
#- cn
|
widget:
|
||||||
|
- messages:
|
||||||
#metrics:
|
- role: user
|
||||||
##如 CIDEr、Blue、ROUGE 等
|
content: Can you provide ways to eat combinations of bananas and dragonfruits?
|
||||||
#- CIDEr
|
|
||||||
|
|
||||||
#tags:
|
|
||||||
##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
|
|
||||||
#- pretrained
|
|
||||||
|
|
||||||
#tools:
|
|
||||||
##如 vllm、fastchat、llamacpp、AdaSeq 等
|
|
||||||
#- vllm
|
|
||||||
---
|
---
|
||||||
### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
|
|
||||||
#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型
|
|
||||||
|
|
||||||
SDK下载
|
# mlx-community/Phi-3-medium-128k-instruct-bf16
|
||||||
|
|
||||||
|
The Model [mlx-community/Phi-3-medium-128k-instruct-bf16](https://huggingface.co/mlx-community/Phi-3-medium-128k-instruct-bf16) was converted to MLX format from [microsoft/Phi-3-medium-128k-instruct](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) using mlx-lm version **0.18.1**.
|
||||||
|
|
||||||
|
## Use with mlx
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
#安装ModelScope
|
pip install mlx-lm
|
||||||
pip install modelscope
|
|
||||||
```
|
|
||||||
```python
|
|
||||||
#SDK模型下载
|
|
||||||
from modelscope import snapshot_download
|
|
||||||
model_dir = snapshot_download('mlx-community/Phi-3-medium-128k-instruct-bf16')
|
|
||||||
```
|
|
||||||
Git下载
|
|
||||||
```
|
|
||||||
#Git模型下载
|
|
||||||
git clone https://www.modelscope.cn/mlx-community/Phi-3-medium-128k-instruct-bf16.git
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<p style="color: lightgrey;">如果您是本模型的贡献者,我们邀请您根据<a href="https://modelscope.cn/docs/ModelScope%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%85%A5%E6%B5%81%E7%A8%8B%E6%A6%82%E8%A7%88" style="color: lightgrey; text-decoration: underline;">模型贡献文档</a>,及时完善模型卡片内容。</p>
|
```python
|
||||||
|
from mlx_lm import load, generate
|
||||||
|
|
||||||
|
model, tokenizer = load("mlx-community/Phi-3-medium-128k-instruct-bf16")
|
||||||
|
response = generate(model, tokenizer, prompt="hello", verbose=True)
|
||||||
|
```
|
||||||
|
|||||||
13
added_tokens.json
Normal file
13
added_tokens.json
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
{
|
||||||
|
"<|assistant|>": 32001,
|
||||||
|
"<|endoftext|>": 32000,
|
||||||
|
"<|end|>": 32007,
|
||||||
|
"<|placeholder1|>": 32002,
|
||||||
|
"<|placeholder2|>": 32003,
|
||||||
|
"<|placeholder3|>": 32004,
|
||||||
|
"<|placeholder4|>": 32005,
|
||||||
|
"<|placeholder5|>": 32008,
|
||||||
|
"<|placeholder6|>": 32009,
|
||||||
|
"<|system|>": 32006,
|
||||||
|
"<|user|>": 32010
|
||||||
|
}
|
||||||
169
config.json
Normal file
169
config.json
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
{
|
||||||
|
"architectures": [
|
||||||
|
"Phi3ForCausalLM"
|
||||||
|
],
|
||||||
|
"attention_bias": false,
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"auto_map": {
|
||||||
|
"AutoConfig": "configuration_phi3.Phi3Config",
|
||||||
|
"AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
|
||||||
|
},
|
||||||
|
"bos_token_id": 1,
|
||||||
|
"embd_pdrop": 0.0,
|
||||||
|
"eos_token_id": 32000,
|
||||||
|
"hidden_act": "silu",
|
||||||
|
"hidden_size": 5120,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 17920,
|
||||||
|
"max_position_embeddings": 131072,
|
||||||
|
"model_type": "phi3",
|
||||||
|
"num_attention_heads": 40,
|
||||||
|
"num_hidden_layers": 40,
|
||||||
|
"num_key_value_heads": 10,
|
||||||
|
"original_max_position_embeddings": 4096,
|
||||||
|
"pad_token_id": null,
|
||||||
|
"resid_pdrop": 0.0,
|
||||||
|
"rms_norm_eps": 1e-05,
|
||||||
|
"rope_scaling": {
|
||||||
|
"long_factor": [
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.25,
|
||||||
|
1.25,
|
||||||
|
1.5,
|
||||||
|
2.0,
|
||||||
|
2.75,
|
||||||
|
5.75,
|
||||||
|
5.75,
|
||||||
|
6.5,
|
||||||
|
9.25,
|
||||||
|
11.0,
|
||||||
|
13.25,
|
||||||
|
19.25,
|
||||||
|
19.75,
|
||||||
|
19.75,
|
||||||
|
21.25,
|
||||||
|
21.5,
|
||||||
|
26.5,
|
||||||
|
30.0,
|
||||||
|
33.75,
|
||||||
|
35.25,
|
||||||
|
38.5,
|
||||||
|
42.0,
|
||||||
|
42.25,
|
||||||
|
46.0,
|
||||||
|
47.0,
|
||||||
|
50.0,
|
||||||
|
50.5,
|
||||||
|
51.0,
|
||||||
|
52.0,
|
||||||
|
52.75,
|
||||||
|
53.75,
|
||||||
|
54.75,
|
||||||
|
57.0,
|
||||||
|
57.25,
|
||||||
|
58.5,
|
||||||
|
59.25,
|
||||||
|
59.5,
|
||||||
|
62.0,
|
||||||
|
62.5,
|
||||||
|
62.75,
|
||||||
|
63.25,
|
||||||
|
63.25,
|
||||||
|
63.25,
|
||||||
|
63.75,
|
||||||
|
64.0,
|
||||||
|
64.0,
|
||||||
|
64.25,
|
||||||
|
64.5,
|
||||||
|
64.5,
|
||||||
|
65.0,
|
||||||
|
65.0
|
||||||
|
],
|
||||||
|
"short_factor": [
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
1.01,
|
||||||
|
1.02,
|
||||||
|
1.02,
|
||||||
|
1.04,
|
||||||
|
1.04,
|
||||||
|
1.07,
|
||||||
|
1.07,
|
||||||
|
1.1,
|
||||||
|
1.3000000000000003,
|
||||||
|
1.3000000000000003,
|
||||||
|
1.5000000000000004,
|
||||||
|
1.5700000000000005,
|
||||||
|
1.9000000000000008,
|
||||||
|
2.3100000000000014,
|
||||||
|
2.759999999999992,
|
||||||
|
3.3899999999999784,
|
||||||
|
3.9399999999999666,
|
||||||
|
4.009999999999965,
|
||||||
|
4.289999999999959,
|
||||||
|
4.349999999999958,
|
||||||
|
5.349999999999937,
|
||||||
|
6.659999999999909,
|
||||||
|
7.029999999999901,
|
||||||
|
7.51999999999989,
|
||||||
|
8.00999999999988,
|
||||||
|
8.249999999999876,
|
||||||
|
8.279999999999875,
|
||||||
|
9.629999999999846,
|
||||||
|
9.89999999999984,
|
||||||
|
10.589999999999826,
|
||||||
|
11.049999999999816,
|
||||||
|
11.7899999999998,
|
||||||
|
12.189999999999792,
|
||||||
|
12.889999999999777,
|
||||||
|
13.129999999999772,
|
||||||
|
13.16999999999977,
|
||||||
|
13.20999999999977,
|
||||||
|
13.479999999999764,
|
||||||
|
13.539999999999763,
|
||||||
|
13.779999999999758,
|
||||||
|
13.929999999999755,
|
||||||
|
14.429999999999744,
|
||||||
|
14.759999999999737,
|
||||||
|
15.149999999999729,
|
||||||
|
15.419999999999723,
|
||||||
|
15.53999999999972,
|
||||||
|
15.659999999999718,
|
||||||
|
15.749999999999716,
|
||||||
|
15.759999999999716,
|
||||||
|
15.799999999999715,
|
||||||
|
16.05999999999971,
|
||||||
|
16.079999999999714,
|
||||||
|
16.11999999999972,
|
||||||
|
16.11999999999972,
|
||||||
|
16.18999999999973,
|
||||||
|
16.31999999999975,
|
||||||
|
16.539999999999786,
|
||||||
|
16.799999999999827
|
||||||
|
],
|
||||||
|
"type": "su"
|
||||||
|
},
|
||||||
|
"rope_theta": 10000.0,
|
||||||
|
"sliding_window": 131072,
|
||||||
|
"tie_word_embeddings": false,
|
||||||
|
"torch_dtype": "bfloat16",
|
||||||
|
"transformers_version": "4.39.3",
|
||||||
|
"use_cache": true,
|
||||||
|
"vocab_size": 32064
|
||||||
|
}
|
||||||
1
configuration.json
Normal file
1
configuration.json
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}
|
||||||
213
configuration_phi3.py
Normal file
213
configuration_phi3.py
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
""" Phi-3 model configuration"""
|
||||||
|
|
||||||
|
|
||||||
|
from transformers.configuration_utils import PretrainedConfig
|
||||||
|
from transformers.utils import logging
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.get_logger(__name__)
|
||||||
|
|
||||||
|
PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
||||||
|
"microsoft/Phi-3-mini-4k-instruct": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/config.json",
|
||||||
|
"microsoft/Phi-3-mini-128k-instruct": "https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/config.json",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class Phi3Config(PretrainedConfig):
|
||||||
|
r"""
|
||||||
|
This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
|
||||||
|
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
||||||
|
defaults will yield a similar configuration to that of the
|
||||||
|
[microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
|
||||||
|
|
||||||
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||||||
|
documentation from [`PretrainedConfig`] for more information.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
vocab_size (`int`, *optional*, defaults to 32064):
|
||||||
|
Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
|
||||||
|
`inputs_ids` passed when calling [`Phi3Model`].
|
||||||
|
hidden_size (`int`, *optional*, defaults to 3072):
|
||||||
|
Dimension of the hidden representations.
|
||||||
|
intermediate_size (`int`, *optional*, defaults to 8192):
|
||||||
|
Dimension of the MLP representations.
|
||||||
|
num_hidden_layers (`int`, *optional*, defaults to 32):
|
||||||
|
Number of hidden layers in the Transformer decoder.
|
||||||
|
num_attention_heads (`int`, *optional*, defaults to 32):
|
||||||
|
Number of attention heads for each attention layer in the Transformer decoder.
|
||||||
|
num_key_value_heads (`int`, *optional*):
|
||||||
|
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
||||||
|
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
||||||
|
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
||||||
|
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
||||||
|
by meanpooling all the original heads within that group. For more details checkout [this
|
||||||
|
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
||||||
|
`num_attention_heads`.
|
||||||
|
resid_pdrop (`float`, *optional*, defaults to 0.0):
|
||||||
|
Dropout probability for mlp outputs.
|
||||||
|
embd_pdrop (`int`, *optional*, defaults to 0.0):
|
||||||
|
The dropout ratio for the embeddings.
|
||||||
|
attention_dropout (`float`, *optional*, defaults to 0.0):
|
||||||
|
The dropout ratio after computing the attention scores.
|
||||||
|
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
||||||
|
The non-linear activation function (function or string) in the decoder.
|
||||||
|
max_position_embeddings (`int`, *optional*, defaults to 4096):
|
||||||
|
The maximum sequence length that this model might ever be used with.
|
||||||
|
original_max_position_embeddings (`int`, *optional*, defaults to 4096):
|
||||||
|
The maximum sequence length that this model was trained with. This is used to determine the size of the
|
||||||
|
original RoPE embeddings when using long scaling.
|
||||||
|
initializer_range (`float`, *optional*, defaults to 0.02):
|
||||||
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
||||||
|
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
|
||||||
|
The epsilon value used for the RMSNorm.
|
||||||
|
use_cache (`bool`, *optional*, defaults to `True`):
|
||||||
|
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
||||||
|
relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
|
||||||
|
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
||||||
|
Whether to tie weight embeddings
|
||||||
|
rope_theta (`float`, *optional*, defaults to 10000.0):
|
||||||
|
The base period of the RoPE embeddings.
|
||||||
|
rope_scaling (`dict`, *optional*):
|
||||||
|
The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
|
||||||
|
contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be either `su` or `yarn` and
|
||||||
|
the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
|
||||||
|
divided by the number of attention heads divided by 2.
|
||||||
|
bos_token_id (`int`, *optional*, defaults to 1):
|
||||||
|
The id of the "beginning-of-sequence" token.
|
||||||
|
eos_token_id (`int`, *optional*, defaults to 32000):
|
||||||
|
The id of the "end-of-sequence" token.
|
||||||
|
pad_token_id (`int`, *optional*, defaults to 32000):
|
||||||
|
The id of the padding token.
|
||||||
|
sliding_window (`int`, *optional*):
|
||||||
|
Sliding window attention window size. If `None`, no sliding window is applied.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import Phi3Model, Phi3Config
|
||||||
|
|
||||||
|
>>> # Initializing a Phi-3 style configuration
|
||||||
|
>>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
|
||||||
|
|
||||||
|
>>> # Initializing a model from the configuration
|
||||||
|
>>> model = Phi3Model(configuration)
|
||||||
|
|
||||||
|
>>> # Accessing the model configuration
|
||||||
|
>>> configuration = model.config
|
||||||
|
```"""
|
||||||
|
|
||||||
|
model_type = "phi3"
|
||||||
|
keys_to_ignore_at_inference = ["past_key_values"]
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
vocab_size=32064,
|
||||||
|
hidden_size=3072,
|
||||||
|
intermediate_size=8192,
|
||||||
|
num_hidden_layers=32,
|
||||||
|
num_attention_heads=32,
|
||||||
|
num_key_value_heads=None,
|
||||||
|
resid_pdrop=0.0,
|
||||||
|
embd_pdrop=0.0,
|
||||||
|
attention_dropout=0.0,
|
||||||
|
hidden_act="silu",
|
||||||
|
max_position_embeddings=4096,
|
||||||
|
original_max_position_embeddings=4096,
|
||||||
|
initializer_range=0.02,
|
||||||
|
rms_norm_eps=1e-5,
|
||||||
|
use_cache=True,
|
||||||
|
tie_word_embeddings=False,
|
||||||
|
rope_theta=10000.0,
|
||||||
|
rope_scaling=None,
|
||||||
|
bos_token_id=1,
|
||||||
|
eos_token_id=32000,
|
||||||
|
pad_token_id=32000,
|
||||||
|
sliding_window=None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
self.vocab_size = vocab_size
|
||||||
|
self.hidden_size = hidden_size
|
||||||
|
self.intermediate_size = intermediate_size
|
||||||
|
self.num_hidden_layers = num_hidden_layers
|
||||||
|
self.num_attention_heads = num_attention_heads
|
||||||
|
|
||||||
|
if num_key_value_heads is None:
|
||||||
|
num_key_value_heads = num_attention_heads
|
||||||
|
|
||||||
|
self.num_key_value_heads = num_key_value_heads
|
||||||
|
self.resid_pdrop = resid_pdrop
|
||||||
|
self.embd_pdrop = embd_pdrop
|
||||||
|
self.attention_dropout = attention_dropout
|
||||||
|
self.hidden_act = hidden_act
|
||||||
|
self.max_position_embeddings = max_position_embeddings
|
||||||
|
self.original_max_position_embeddings = original_max_position_embeddings
|
||||||
|
self.initializer_range = initializer_range
|
||||||
|
self.rms_norm_eps = rms_norm_eps
|
||||||
|
self.use_cache = use_cache
|
||||||
|
self.rope_theta = rope_theta
|
||||||
|
self.rope_scaling = rope_scaling
|
||||||
|
self._rope_scaling_validation()
|
||||||
|
self.sliding_window = sliding_window
|
||||||
|
|
||||||
|
super().__init__(
|
||||||
|
bos_token_id=bos_token_id,
|
||||||
|
eos_token_id=eos_token_id,
|
||||||
|
pad_token_id=pad_token_id,
|
||||||
|
tie_word_embeddings=tie_word_embeddings,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _rope_scaling_validation(self):
|
||||||
|
"""
|
||||||
|
Validate the `rope_scaling` configuration.
|
||||||
|
"""
|
||||||
|
if self.rope_scaling is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
|
||||||
|
raise ValueError(
|
||||||
|
"`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, "
|
||||||
|
f"got {self.rope_scaling}"
|
||||||
|
)
|
||||||
|
rope_scaling_type = self.rope_scaling.get("type", None)
|
||||||
|
rope_scaling_short_factor = self.rope_scaling.get("short_factor", None)
|
||||||
|
rope_scaling_long_factor = self.rope_scaling.get("long_factor", None)
|
||||||
|
if rope_scaling_type is None or rope_scaling_type not in ["su", "yarn"]:
|
||||||
|
raise ValueError(f"`rope_scaling`'s type field must be one of ['su', 'yarn'], got {rope_scaling_type}")
|
||||||
|
if not (
|
||||||
|
isinstance(rope_scaling_short_factor, list)
|
||||||
|
and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
|
||||||
|
):
|
||||||
|
raise ValueError(
|
||||||
|
f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
|
||||||
|
)
|
||||||
|
if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2:
|
||||||
|
raise ValueError(
|
||||||
|
f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}"
|
||||||
|
)
|
||||||
|
if not (
|
||||||
|
isinstance(rope_scaling_long_factor, list)
|
||||||
|
and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
|
||||||
|
):
|
||||||
|
raise ValueError(
|
||||||
|
f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
|
||||||
|
)
|
||||||
|
if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2:
|
||||||
|
raise ValueError(
|
||||||
|
f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}"
|
||||||
|
)
|
||||||
3
model-00001-of-00006.safetensors
Normal file
3
model-00001-of-00006.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:3f4aa89b8f49edf759086f3ba03ceb7d13c8f952c037eabe227abe6cbb34be6d
|
||||||
|
size 5230576771
|
||||||
3
model-00002-of-00006.safetensors
Normal file
3
model-00002-of-00006.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:8c09069ab0098038d30331c205283accde26d34bf6c8e189540bead8770e7de3
|
||||||
|
size 5321692436
|
||||||
3
model-00003-of-00006.safetensors
Normal file
3
model-00003-of-00006.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:2c390cf11da5855eb576771577f82b7da818595a1de9503c709ad22fbf2ab286
|
||||||
|
size 5269243073
|
||||||
3
model-00004-of-00006.safetensors
Normal file
3
model-00004-of-00006.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:622c046134c41dff94e3a6434df60ec195bd4a93b2987a22651937c8fc7c57ba
|
||||||
|
size 5085762981
|
||||||
3
model-00005-of-00006.safetensors
Normal file
3
model-00005-of-00006.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:2d3d55fa6f139fbe7375dcc041276b087652d12030ba263edda3af3fc7e5743f
|
||||||
|
size 5321692458
|
||||||
3
model-00006-of-00006.safetensors
Normal file
3
model-00006-of-00006.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:7bca60d2cda820e99f9cc7c2273da45cae1faf94eaa4de1af6dc2bafe999338c
|
||||||
|
size 1691536941
|
||||||
250
model.safetensors.index.json
Normal file
250
model.safetensors.index.json
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"total_size": 27920476160
|
||||||
|
},
|
||||||
|
"weight_map": {
|
||||||
|
"lm_head.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.embed_tokens.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.0.input_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.0.mlp.gate_up_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.0.self_attn.qkv_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.1.input_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.1.mlp.gate_up_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.1.self_attn.qkv_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.10.input_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.10.mlp.gate_up_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.10.self_attn.qkv_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.11.input_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.11.mlp.gate_up_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.11.self_attn.qkv_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.12.input_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.12.mlp.gate_up_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.12.self_attn.qkv_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.13.input_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.13.mlp.gate_up_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.13.self_attn.qkv_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.14.input_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.14.mlp.gate_up_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.14.self_attn.qkv_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.15.input_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.15.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.15.mlp.gate_up_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.15.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.15.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.15.self_attn.qkv_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.16.input_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.16.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.16.mlp.gate_up_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.16.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.16.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.16.self_attn.qkv_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.17.input_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.17.mlp.gate_up_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.17.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.17.self_attn.qkv_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.18.input_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.18.mlp.gate_up_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.18.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.18.self_attn.qkv_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.19.input_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.19.mlp.gate_up_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.19.self_attn.qkv_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.2.input_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.2.mlp.gate_up_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.2.self_attn.qkv_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.20.input_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.20.mlp.gate_up_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.20.self_attn.qkv_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.21.input_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.21.mlp.gate_up_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.21.self_attn.qkv_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.22.input_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.22.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.22.mlp.gate_up_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.22.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.22.self_attn.qkv_proj.weight": "model-00003-of-00006.safetensors",
|
||||||
|
"model.layers.23.input_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.23.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.23.mlp.gate_up_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.23.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.23.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.23.self_attn.qkv_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.24.input_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.24.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.24.mlp.gate_up_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.24.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.24.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.24.self_attn.qkv_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.25.input_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.25.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.25.mlp.gate_up_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.25.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.25.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.25.self_attn.qkv_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.26.input_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.26.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.26.mlp.gate_up_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.26.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.26.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.26.self_attn.qkv_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.27.input_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.27.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.27.mlp.gate_up_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.27.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.27.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.27.self_attn.qkv_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.28.input_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.28.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.28.mlp.gate_up_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.28.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.28.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.28.self_attn.qkv_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.29.input_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.29.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.29.mlp.gate_up_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.29.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.29.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.29.self_attn.qkv_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.3.input_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.3.mlp.gate_up_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.3.self_attn.qkv_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.30.input_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.30.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.30.mlp.gate_up_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.30.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.30.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.30.self_attn.qkv_proj.weight": "model-00004-of-00006.safetensors",
|
||||||
|
"model.layers.31.input_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.31.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.31.mlp.gate_up_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.31.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.31.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.31.self_attn.qkv_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.32.input_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.32.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.32.mlp.gate_up_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.32.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.32.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.32.self_attn.qkv_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.33.input_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.33.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.33.mlp.gate_up_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.33.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.33.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.33.self_attn.qkv_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.34.input_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.34.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.34.mlp.gate_up_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.34.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.34.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.34.self_attn.qkv_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.35.input_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.35.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.35.mlp.gate_up_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.35.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.35.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.35.self_attn.qkv_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.36.input_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.36.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.36.mlp.gate_up_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.36.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.36.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.36.self_attn.qkv_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.37.input_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.37.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.37.mlp.gate_up_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.37.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.37.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.37.self_attn.qkv_proj.weight": "model-00005-of-00006.safetensors",
|
||||||
|
"model.layers.38.input_layernorm.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.layers.38.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.layers.38.mlp.gate_up_proj.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.layers.38.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.layers.38.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.layers.38.self_attn.qkv_proj.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.layers.39.input_layernorm.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.layers.39.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.layers.39.mlp.gate_up_proj.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.layers.39.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.layers.39.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.layers.39.self_attn.qkv_proj.weight": "model-00006-of-00006.safetensors",
|
||||||
|
"model.layers.4.input_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.4.mlp.gate_up_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.4.self_attn.qkv_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.5.input_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.5.mlp.gate_up_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.5.self_attn.qkv_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.6.input_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.6.mlp.gate_up_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.6.self_attn.qkv_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.7.input_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.7.mlp.gate_up_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.7.self_attn.qkv_proj.weight": "model-00001-of-00006.safetensors",
|
||||||
|
"model.layers.8.input_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.8.mlp.gate_up_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.8.self_attn.qkv_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.9.input_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.9.mlp.gate_up_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.layers.9.self_attn.qkv_proj.weight": "model-00002-of-00006.safetensors",
|
||||||
|
"model.norm.weight": "model-00006-of-00006.safetensors"
|
||||||
|
}
|
||||||
|
}
|
||||||
1606
modeling_phi3.py
Normal file
1606
modeling_phi3.py
Normal file
File diff suppressed because it is too large
Load Diff
214
sample_finetune.py
Normal file
214
sample_finetune.py
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
import sys
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import datasets
|
||||||
|
from datasets import load_dataset
|
||||||
|
from peft import LoraConfig
|
||||||
|
import torch
|
||||||
|
import transformers
|
||||||
|
from trl import SFTTrainer
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig
|
||||||
|
|
||||||
|
"""
|
||||||
|
A simple example on using SFTTrainer and Accelerate to finetune Phi-3 models. For
|
||||||
|
a more advanced example, please follow HF alignment-handbook/scripts/run_sft.py.
|
||||||
|
This example has utilized DeepSpeed ZeRO3 offload to reduce the memory usage. The
|
||||||
|
script can be run on V100 or later generation GPUs. Here are some suggestions on
|
||||||
|
futher reducing memory consumption:
|
||||||
|
- reduce batch size
|
||||||
|
- decrease lora dimension
|
||||||
|
- restrict lora target modules
|
||||||
|
Please follow these steps to run the script:
|
||||||
|
1. Install dependencies:
|
||||||
|
conda install -c conda-forge accelerate
|
||||||
|
pip3 install -i https://pypi.org/simple/ bitsandbytes
|
||||||
|
pip3 install peft transformers trl datasets
|
||||||
|
pip3 install deepspeed
|
||||||
|
2. Setup accelerate and deepspeed config based on the machine used:
|
||||||
|
accelerate config
|
||||||
|
Here is a sample config for deepspeed zero3:
|
||||||
|
compute_environment: LOCAL_MACHINE
|
||||||
|
debug: false
|
||||||
|
deepspeed_config:
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
offload_optimizer_device: none
|
||||||
|
offload_param_device: none
|
||||||
|
zero3_init_flag: true
|
||||||
|
zero3_save_16bit_model: true
|
||||||
|
zero_stage: 3
|
||||||
|
distributed_type: DEEPSPEED
|
||||||
|
downcast_bf16: 'no'
|
||||||
|
enable_cpu_affinity: false
|
||||||
|
machine_rank: 0
|
||||||
|
main_training_function: main
|
||||||
|
mixed_precision: bf16
|
||||||
|
num_machines: 1
|
||||||
|
num_processes: 4
|
||||||
|
rdzv_backend: static
|
||||||
|
same_network: true
|
||||||
|
tpu_env: []
|
||||||
|
tpu_use_cluster: false
|
||||||
|
tpu_use_sudo: false
|
||||||
|
use_cpu: false
|
||||||
|
3. check accelerate config:
|
||||||
|
accelerate env
|
||||||
|
4. Run the code:
|
||||||
|
accelerate launch sample_finetune.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
###################
|
||||||
|
# Hyper-parameters
|
||||||
|
###################
|
||||||
|
training_config = {
|
||||||
|
"bf16": True,
|
||||||
|
"do_eval": False,
|
||||||
|
"learning_rate": 5.0e-06,
|
||||||
|
"log_level": "info",
|
||||||
|
"logging_steps": 20,
|
||||||
|
"logging_strategy": "steps",
|
||||||
|
"lr_scheduler_type": "cosine",
|
||||||
|
"num_train_epochs": 1,
|
||||||
|
"max_steps": -1,
|
||||||
|
"output_dir": "./checkpoint_dir",
|
||||||
|
"overwrite_output_dir": True,
|
||||||
|
"per_device_eval_batch_size": 4,
|
||||||
|
"per_device_train_batch_size": 4,
|
||||||
|
"remove_unused_columns": True,
|
||||||
|
"save_steps": 100,
|
||||||
|
"save_total_limit": 1,
|
||||||
|
"seed": 0,
|
||||||
|
"gradient_checkpointing": True,
|
||||||
|
"gradient_checkpointing_kwargs":{"use_reentrant": False},
|
||||||
|
"gradient_accumulation_steps": 1,
|
||||||
|
"warmup_ratio": 0.2,
|
||||||
|
}
|
||||||
|
|
||||||
|
peft_config = {
|
||||||
|
"r": 16,
|
||||||
|
"lora_alpha": 32,
|
||||||
|
"lora_dropout": 0.05,
|
||||||
|
"bias": "none",
|
||||||
|
"task_type": "CAUSAL_LM",
|
||||||
|
"target_modules": "all-linear",
|
||||||
|
"modules_to_save": None,
|
||||||
|
}
|
||||||
|
train_conf = TrainingArguments(**training_config)
|
||||||
|
peft_conf = LoraConfig(**peft_config)
|
||||||
|
|
||||||
|
|
||||||
|
###############
|
||||||
|
# Setup logging
|
||||||
|
###############
|
||||||
|
logging.basicConfig(
|
||||||
|
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||||
|
datefmt="%Y-%m-%d %H:%M:%S",
|
||||||
|
handlers=[logging.StreamHandler(sys.stdout)],
|
||||||
|
)
|
||||||
|
log_level = train_conf.get_process_log_level()
|
||||||
|
logger.setLevel(log_level)
|
||||||
|
datasets.utils.logging.set_verbosity(log_level)
|
||||||
|
transformers.utils.logging.set_verbosity(log_level)
|
||||||
|
transformers.utils.logging.enable_default_handler()
|
||||||
|
transformers.utils.logging.enable_explicit_format()
|
||||||
|
|
||||||
|
# Log on each process a small summary
|
||||||
|
logger.warning(
|
||||||
|
f"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}"
|
||||||
|
+ f" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}"
|
||||||
|
)
|
||||||
|
logger.info(f"Training/evaluation parameters {train_conf}")
|
||||||
|
logger.info(f"PEFT parameters {peft_conf}")
|
||||||
|
|
||||||
|
|
||||||
|
################
|
||||||
|
# Modle Loading
|
||||||
|
################
|
||||||
|
checkpoint_path = "microsoft/Phi-3-medium-4k-instruct"
|
||||||
|
# checkpoint_path = "microsoft/Phi-3-medium-128k-instruct"
|
||||||
|
model_kwargs = dict(
|
||||||
|
use_cache=False,
|
||||||
|
trust_remote_code=True,
|
||||||
|
attn_implementation="flash_attention_2", # loading the model with flash-attenstion support
|
||||||
|
torch_dtype=torch.bfloat16,
|
||||||
|
device_map=None
|
||||||
|
)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
|
||||||
|
tokenizer.model_max_length = 2048
|
||||||
|
tokenizer.pad_token = tokenizer.unk_token # use unk rather than eos token to prevent endless generation
|
||||||
|
tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
|
||||||
|
tokenizer.padding_side = 'right'
|
||||||
|
|
||||||
|
|
||||||
|
##################
|
||||||
|
# Data Processing
|
||||||
|
##################
|
||||||
|
def apply_chat_template(
|
||||||
|
example,
|
||||||
|
tokenizer,
|
||||||
|
):
|
||||||
|
messages = example["messages"]
|
||||||
|
example["text"] = tokenizer.apply_chat_template(
|
||||||
|
messages, tokenize=False, add_generation_prompt=False)
|
||||||
|
return example
|
||||||
|
|
||||||
|
raw_dataset = load_dataset("HuggingFaceH4/ultrachat_200k")
|
||||||
|
train_dataset = raw_dataset["train_sft"]
|
||||||
|
test_dataset = raw_dataset["test_sft"]
|
||||||
|
column_names = list(train_dataset.features)
|
||||||
|
|
||||||
|
processed_train_dataset = train_dataset.map(
|
||||||
|
apply_chat_template,
|
||||||
|
fn_kwargs={"tokenizer": tokenizer},
|
||||||
|
num_proc=10,
|
||||||
|
remove_columns=column_names,
|
||||||
|
desc="Applying chat template to train_sft",
|
||||||
|
)
|
||||||
|
|
||||||
|
processed_test_dataset = test_dataset.map(
|
||||||
|
apply_chat_template,
|
||||||
|
fn_kwargs={"tokenizer": tokenizer},
|
||||||
|
num_proc=10,
|
||||||
|
remove_columns=column_names,
|
||||||
|
desc="Applying chat template to test_sft",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
###########
|
||||||
|
# Training
|
||||||
|
###########
|
||||||
|
trainer = SFTTrainer(
|
||||||
|
model=model,
|
||||||
|
args=train_conf,
|
||||||
|
peft_config=peft_conf,
|
||||||
|
train_dataset=processed_train_dataset,
|
||||||
|
eval_dataset=processed_test_dataset,
|
||||||
|
max_seq_length=2048,
|
||||||
|
dataset_text_field="text",
|
||||||
|
tokenizer=tokenizer,
|
||||||
|
packing=True
|
||||||
|
)
|
||||||
|
train_result = trainer.train()
|
||||||
|
metrics = train_result.metrics
|
||||||
|
trainer.log_metrics("train", metrics)
|
||||||
|
trainer.save_metrics("train", metrics)
|
||||||
|
trainer.save_state()
|
||||||
|
|
||||||
|
|
||||||
|
#############
|
||||||
|
# Evaluation
|
||||||
|
#############
|
||||||
|
tokenizer.padding_side = 'left'
|
||||||
|
metrics = trainer.evaluate()
|
||||||
|
metrics["eval_samples"] = len(processed_test_dataset)
|
||||||
|
trainer.log_metrics("eval", metrics)
|
||||||
|
trainer.save_metrics("eval", metrics)
|
||||||
|
|
||||||
|
|
||||||
|
# ############
|
||||||
|
# # Save model
|
||||||
|
# ############
|
||||||
|
trainer.save_model(train_conf.output_dir)
|
||||||
30
special_tokens_map.json
Normal file
30
special_tokens_map.json
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
{
|
||||||
|
"bos_token": {
|
||||||
|
"content": "<s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"eos_token": {
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"pad_token": {
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"unk_token": {
|
||||||
|
"content": "<unk>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
||||||
93463
tokenizer.json
Normal file
93463
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
3
tokenizer.model
Normal file
3
tokenizer.model
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
||||||
|
size 499723
|
||||||
131
tokenizer_config.json
Normal file
131
tokenizer_config.json
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
{
|
||||||
|
"add_bos_token": false,
|
||||||
|
"add_eos_token": false,
|
||||||
|
"add_prefix_space": null,
|
||||||
|
"added_tokens_decoder": {
|
||||||
|
"0": {
|
||||||
|
"content": "<unk>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"content": "<s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"2": {
|
||||||
|
"content": "</s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": true,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"32000": {
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"32001": {
|
||||||
|
"content": "<|assistant|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": true,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"32002": {
|
||||||
|
"content": "<|placeholder1|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": true,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"32003": {
|
||||||
|
"content": "<|placeholder2|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": true,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"32004": {
|
||||||
|
"content": "<|placeholder3|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": true,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"32005": {
|
||||||
|
"content": "<|placeholder4|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": true,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"32006": {
|
||||||
|
"content": "<|system|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": true,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"32007": {
|
||||||
|
"content": "<|end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": true,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"32008": {
|
||||||
|
"content": "<|placeholder5|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": true,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"32009": {
|
||||||
|
"content": "<|placeholder6|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": true,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"32010": {
|
||||||
|
"content": "<|user|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": true,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"bos_token": "<s>",
|
||||||
|
"chat_template": "{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}",
|
||||||
|
"clean_up_tokenization_spaces": false,
|
||||||
|
"eos_token": "<|endoftext|>",
|
||||||
|
"legacy": false,
|
||||||
|
"model_max_length": 131072,
|
||||||
|
"pad_token": "<|endoftext|>",
|
||||||
|
"padding_side": "left",
|
||||||
|
"sp_model_kwargs": {},
|
||||||
|
"tokenizer_class": "LlamaTokenizer",
|
||||||
|
"unk_token": "<unk>",
|
||||||
|
"use_default_system_prompt": false
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user