初始化项目,由ModelHub XC社区提供模型
Model: ericflo/Llama-3.1-8B-ContinuedTraining Source: Original Platform
This commit is contained in:
42
.gitattributes
vendored
Normal file
42
.gitattributes
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.model filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
Llama-3.1-8B-ContinuedTraining2.C150.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||||
|
Llama-3.1-8B-ContinuedTraining2.C250.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||||
|
Llama-3.1-8B-ContinuedTraining2.C350.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||||
|
Llama-3.1-8B-ContinuedTraining2.C450.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||||
|
Llama-3.1-8B-ContinuedTraining2.C550.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||||
|
Llama-3.1-8B-ContinuedTraining2.C650.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||||
|
Llama-3.1-8B-ContinuedTraining2.C650.bf16.gguf filter=lfs diff=lfs merge=lfs -text
|
||||||
3
Llama-3.1-8B-ContinuedTraining2.C150.Q8_0.gguf
Normal file
3
Llama-3.1-8B-ContinuedTraining2.C150.Q8_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:476995bee6e3258e212f22f3c72f1c3bfc3251bf649f37b04c4667c4524a7d64
|
||||||
|
size 8540776096
|
||||||
3
Llama-3.1-8B-ContinuedTraining2.C250.Q8_0.gguf
Normal file
3
Llama-3.1-8B-ContinuedTraining2.C250.Q8_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:8654ecfd1ec88730ded3b96f5767509ccbc0d8a461e7876b720de609f6d7d1ab
|
||||||
|
size 8540776128
|
||||||
3
Llama-3.1-8B-ContinuedTraining2.C350.Q8_0.gguf
Normal file
3
Llama-3.1-8B-ContinuedTraining2.C350.Q8_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:2b2dfe9127fcc0d7388a364f5eba9143a8bb23b623a463916ec28c50db43026f
|
||||||
|
size 8540776128
|
||||||
3
Llama-3.1-8B-ContinuedTraining2.C450.Q8_0.gguf
Normal file
3
Llama-3.1-8B-ContinuedTraining2.C450.Q8_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:b9f0864fc2539f1f9a7dbedf2dd8e4ec86df3e405ef4dd685be2ce92027114d1
|
||||||
|
size 8540776128
|
||||||
3
Llama-3.1-8B-ContinuedTraining2.C550.Q8_0.gguf
Normal file
3
Llama-3.1-8B-ContinuedTraining2.C550.Q8_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:2f5e115b6c0789340061af22c8a0c50e60921998edd32404ac2bdc6277ebc7fe
|
||||||
|
size 8540776128
|
||||||
3
Llama-3.1-8B-ContinuedTraining2.C650.Q8_0.gguf
Normal file
3
Llama-3.1-8B-ContinuedTraining2.C650.Q8_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:0929d8756d0e81ff16e63b6cd05d4bf15975de49d57f1c1726c20998ae81afa9
|
||||||
|
size 8540776128
|
||||||
3
Llama-3.1-8B-ContinuedTraining2.C650.bf16.gguf
Normal file
3
Llama-3.1-8B-ContinuedTraining2.C650.bf16.gguf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:7d05b24b3595df73de792ed99e68da59cc7b9f6a72a63de3dac04174c5a8aafc
|
||||||
|
size 16068896448
|
||||||
110
README.md
Normal file
110
README.md
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
---
|
||||||
|
base_model: meta-llama/Meta-Llama-3.1-8B
|
||||||
|
library_name: peft
|
||||||
|
license: apache-2.0
|
||||||
|
datasets:
|
||||||
|
- mlabonne/FineTome-100k
|
||||||
|
- mlfoundations/dclm-baseline-1.0-parquet
|
||||||
|
- wikimedia/wikipedia
|
||||||
|
- bigcode/starcoderdata
|
||||||
|
language:
|
||||||
|
- en
|
||||||
|
pipeline_tag: text-generation
|
||||||
|
---
|
||||||
|
|
||||||
|
# Model Card: Custom LLM with High-Rank Adapter
|
||||||
|
|
||||||
|
## Model Overview
|
||||||
|
|
||||||
|
This model is a custom-trained language model based on the Meta-Llama-3.1-8B architecture. Unlike most instruction-tuned models, it was trained directly on a mixture of high-quality datasets for general text and code completion tasks, as well as instruction-following. A high-rank adapter (rank 128) is used to enhance learning capacity while mitigating catastrophic forgetting, which distinguishes this model from common low-rank fine-tuning methods.
|
||||||
|
|
||||||
|
- **Developer:** Eric Florenzano
|
||||||
|
- **Model Type:** Large Language Model (LLM)
|
||||||
|
- **Language(s):** English, with a focus on Python for code-related tasks
|
||||||
|
- **License:** Apache-2.0
|
||||||
|
- **Base Model:** meta-llama/Meta-Llama-3.1-8B
|
||||||
|
|
||||||
|
## Model Sources
|
||||||
|
|
||||||
|
- **Repository:** [Custom Llama-3.1-8B Training](https://huggingface.co/ericflo/Llama-3.1-8B-ContinuedTraining)
|
||||||
|
|
||||||
|
## Model Training and Approach
|
||||||
|
|
||||||
|
### Unique Training Approach
|
||||||
|
|
||||||
|
Instead of fine-tuning an instruction-tuned model, the base Meta-Llama-3.1-8B model was trained with a diverse set of high-quality pretraining and instruction datasets. The training focused on both text completion/prediction and instruction-following tasks.
|
||||||
|
|
||||||
|
Key features of the training process:
|
||||||
|
- **Training Data**: A blend of high-quality data sources, each serving different purposes:
|
||||||
|
- **FineTome-100k**: High-quality instruction-tuned data for general language understanding and task completion.
|
||||||
|
- **dclm-baseline-1.0-parquet**: A pretraining corpus from Apple, used for standard text completion/prediction tasks.
|
||||||
|
- **English Wikipedia**: Used for text completion tasks with a focus on broad language understanding.
|
||||||
|
- **Starcoder**: High-quality Python-focused code dataset used for code completion tasks.
|
||||||
|
- **Instruction-Tuning**: The model alternates randomly between ChatML and the Llama Chat template during training to learn a general-purpose instruction-following format that is not restricted to one specific style.
|
||||||
|
- **Strata Information**: Training data is prefixed with contextual information (e.g., URLs for Wikipedia articles) to address data imbalance, allowing the model to weigh different data sources appropriately. However, this prefixing is only used during training, with inference relying on the model's learned representations.
|
||||||
|
- **High-Rank Adapter**: The model uses a high-rank adapter (rank 128) to learn more complex representations and reduce the risk of catastrophic forgetting, as opposed to the commonly used low-rank adaptation approach (LoRA).
|
||||||
|
|
||||||
|
### Training Procedure
|
||||||
|
|
||||||
|
The model was trained for 650 steps using the datasets mentioned above. During this process, the focus was on ensuring a balanced learning process across different task types (text completion, code completion, instruction-following). The high-rank adapter plays a significant role in maintaining model capacity while reducing computational complexity.
|
||||||
|
|
||||||
|
#### Training Hyperparameters
|
||||||
|
|
||||||
|
- **Adapter Rank:** 128
|
||||||
|
- **Training Steps:** 650
|
||||||
|
- **Base Model:** meta-llama/Meta-Llama-3.1-8B
|
||||||
|
|
||||||
|
## Intended Uses
|
||||||
|
|
||||||
|
This model is designed for a variety of natural language processing tasks, including:
|
||||||
|
|
||||||
|
- **Text Completion and Generation**: Generating and predicting text based on provided input.
|
||||||
|
- **Code Completion**: Assisting with Python code generation and completion tasks.
|
||||||
|
- **Instruction Following**: Capable of following complex instructions across multiple domains.
|
||||||
|
- **General Language Understanding**: Leveraging its diverse training data for broad language comprehension tasks.
|
||||||
|
|
||||||
|
### Out-of-Scope Use
|
||||||
|
|
||||||
|
- **Real-time Knowledge**: The model does not have access to real-time data or events beyond its training period.
|
||||||
|
- **Harmful or Biased Content**: Should not be used to generate harmful, biased, or misleading information.
|
||||||
|
- **Critical Decision-Making**: Should not be relied upon for critical tasks that require human oversight and judgment.
|
||||||
|
|
||||||
|
## Bias, Risks, and Limitations
|
||||||
|
|
||||||
|
While this model was trained on a mix of high-quality datasets, it may still exhibit biases present in the training data, especially in domains with limited or skewed representation. Users should:
|
||||||
|
|
||||||
|
- Be aware of potential biases, particularly in sensitive domains.
|
||||||
|
- Review model outputs for accuracy, especially for code generation and decision-making tasks.
|
||||||
|
- Use the model as a tool to assist in human decision-making, not as a replacement.
|
||||||
|
- Understand that performance may vary across different domains and task types.
|
||||||
|
|
||||||
|
## Evaluation
|
||||||
|
|
||||||
|
| Tasks |Version| Filter |n-shot| Metric | |Value | |Stderr|
|
||||||
|
|-----------------|-------|----------------|-----:|-----------|---|-----:|---|------|
|
||||||
|
|tinyBenchmarks | N/A| | | | | | | |
|
||||||
|
| - tinyArc | 0|none | 25|acc_norm |↑ |0.6056|± | N/A|
|
||||||
|
| - tinyGSM8k | 0|flexible-extract| 5|exact_match|↑ |0.4793|± | N/A|
|
||||||
|
| | |strict-match | 5|exact_match|↑ |0.4793|± | N/A|
|
||||||
|
| - tinyHellaswag | 0|none | 10|acc_norm |↑ |0.8261|± | N/A|
|
||||||
|
| - tinyMMLU | 0|none | 0|acc_norm |↑ |0.6358|± | N/A|
|
||||||
|
| - tinyTruthfulQA| 0|none | 0|acc |↑ |0.5098|± | N/A|
|
||||||
|
| - tinyWinogrande| 0|none | 5|acc_norm |↑ |0.7447|± | N/A|
|
||||||
|
|
||||||
|
## Technical Specifications
|
||||||
|
|
||||||
|
### Model Architecture
|
||||||
|
|
||||||
|
- **Base Model**: meta-llama/Meta-Llama-3.1-8B
|
||||||
|
- **High-Rank Adapter**: A rank 128 adapter used to learn more complex patterns while reducing catastrophic forgetting.
|
||||||
|
- **Objective**: Multi-task learning across text completion, code completion, and instruction following.
|
||||||
|
|
||||||
|
### Compute Infrastructure
|
||||||
|
|
||||||
|
#### Software
|
||||||
|
|
||||||
|
- **Library**: PEFT 0.12.0 for efficient parameter fine-tuning.
|
||||||
|
|
||||||
|
## Model Card Contact
|
||||||
|
|
||||||
|
For inquiries about this model, please contact Eric Florenzano through the [model repository](https://huggingface.co/ericflo/Llama-3.1-8B-ContinuedTraining).
|
||||||
35
config.json
Normal file
35
config.json
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
{
|
||||||
|
"_name_or_path": "meta-llama/Meta-Llama-3.1-8B",
|
||||||
|
"architectures": [
|
||||||
|
"LlamaForCausalLM"
|
||||||
|
],
|
||||||
|
"attention_bias": false,
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bos_token_id": 128000,
|
||||||
|
"eos_token_id": 128001,
|
||||||
|
"hidden_act": "silu",
|
||||||
|
"hidden_size": 4096,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 14336,
|
||||||
|
"max_position_embeddings": 131072,
|
||||||
|
"mlp_bias": false,
|
||||||
|
"model_type": "llama",
|
||||||
|
"num_attention_heads": 32,
|
||||||
|
"num_hidden_layers": 32,
|
||||||
|
"num_key_value_heads": 8,
|
||||||
|
"pretraining_tp": 1,
|
||||||
|
"rms_norm_eps": 1e-05,
|
||||||
|
"rope_scaling": {
|
||||||
|
"factor": 8.0,
|
||||||
|
"high_freq_factor": 4.0,
|
||||||
|
"low_freq_factor": 1.0,
|
||||||
|
"original_max_position_embeddings": 8192,
|
||||||
|
"rope_type": "llama3"
|
||||||
|
},
|
||||||
|
"rope_theta": 500000.0,
|
||||||
|
"tie_word_embeddings": false,
|
||||||
|
"torch_dtype": "float32",
|
||||||
|
"transformers_version": "4.44.2",
|
||||||
|
"use_cache": true,
|
||||||
|
"vocab_size": 128256
|
||||||
|
}
|
||||||
9
generation_config.json
Normal file
9
generation_config.json
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"_from_model_config": true,
|
||||||
|
"bos_token_id": 128000,
|
||||||
|
"do_sample": true,
|
||||||
|
"eos_token_id": 128001,
|
||||||
|
"temperature": 0.6,
|
||||||
|
"top_p": 0.9,
|
||||||
|
"transformers_version": "4.44.2"
|
||||||
|
}
|
||||||
3
model-00001-of-00007.safetensors
Normal file
3
model-00001-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:43aa552d55c6728bcbcbfe12a9e1c408d551be98b328f79d2e72b53f44890114
|
||||||
|
size 4886466168
|
||||||
3
model-00002-of-00007.safetensors
Normal file
3
model-00002-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:ff7fcd5a62c19878bd13e5e1342600fca1cba1227246049c2f8aeca5af5e78ff
|
||||||
|
size 4832007448
|
||||||
3
model-00003-of-00007.safetensors
Normal file
3
model-00003-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:db87b3e8d3bbdc015594e7948d4bf668c0d491b3fcc51fe3439758fd81e58906
|
||||||
|
size 4999813112
|
||||||
3
model-00004-of-00007.safetensors
Normal file
3
model-00004-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:55ba5444fb2c54b9cc084187f177b69d5d74af707826353d45b15c034bc67503
|
||||||
|
size 4999813128
|
||||||
3
model-00005-of-00007.safetensors
Normal file
3
model-00005-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:5505fcc82ae2e91c2ec02eef48445f640363edff6927979604be1cb599920a1d
|
||||||
|
size 4832007496
|
||||||
3
model-00006-of-00007.safetensors
Normal file
3
model-00006-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:53e6900c3349cf2a18b4003038ad8697e3edb3297051676f4b917d00da9048d2
|
||||||
|
size 4999813120
|
||||||
3
model-00007-of-00007.safetensors
Normal file
3
model-00007-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:8f6e00aba3de82926c2fa91dbcf6a952267435264f560450d86af9d03db76741
|
||||||
|
size 2571158184
|
||||||
298
model.safetensors.index.json
Normal file
298
model.safetensors.index.json
Normal file
@@ -0,0 +1,298 @@
|
|||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"total_size": 32121044992
|
||||||
|
},
|
||||||
|
"weight_map": {
|
||||||
|
"lm_head.weight": "model-00007-of-00007.safetensors",
|
||||||
|
"model.embed_tokens.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.input_layernorm.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.input_layernorm.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.10.input_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.input_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.input_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.input_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.14.input_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.14.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.14.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.14.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.14.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.14.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.14.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.14.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.14.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.15.input_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.input_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.input_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.input_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.input_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.2.input_layernorm.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.20.input_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.20.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.20.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.20.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.20.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.20.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.20.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.20.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.20.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.21.input_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.input_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.input_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.input_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.25.input_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.25.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.25.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.25.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.25.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.25.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.25.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.25.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.25.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.26.input_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.input_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.input_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.input_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.3.input_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.3.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.3.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.3.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.3.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.30.input_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.31.input_layernorm.weight": "model-00007-of-00007.safetensors",
|
||||||
|
"model.layers.31.mlp.down_proj.weight": "model-00007-of-00007.safetensors",
|
||||||
|
"model.layers.31.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.31.mlp.up_proj.weight": "model-00007-of-00007.safetensors",
|
||||||
|
"model.layers.31.post_attention_layernorm.weight": "model-00007-of-00007.safetensors",
|
||||||
|
"model.layers.31.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.31.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.31.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.31.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.4.input_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.input_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.input_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.input_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.8.input_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.8.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.8.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.9.input_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.norm.weight": "model-00007-of-00007.safetensors"
|
||||||
|
}
|
||||||
|
}
|
||||||
186
sft.py
Normal file
186
sft.py
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
# flake8: noqa
|
||||||
|
"""
|
||||||
|
pip install -U transformers accelerate trl wandb wheel packaging peft bitsandbytes liger-kernel flash_attn
|
||||||
|
|
||||||
|
python sft.py \
|
||||||
|
--run_name="llama3.1-8b-continued2" \
|
||||||
|
--model_name_or_path="meta-llama/Meta-Llama-3.1-8B" \
|
||||||
|
--dataset_name="mlfoundations/dclm-baseline-1.0-parquet,mlabonne/FineTome-100k" \
|
||||||
|
--report_to="wandb" \
|
||||||
|
--optim="adamw_torch_fused" \
|
||||||
|
--lr_scheduler_type="cosine" \
|
||||||
|
--max_steps=10000000 \
|
||||||
|
--max_seq_length=64000 \
|
||||||
|
--learning_rate=0.0001 \
|
||||||
|
--attn_implementation="flash_attention_2" \
|
||||||
|
--save_strategy="steps" \
|
||||||
|
--save_steps 50 \
|
||||||
|
--save_total_limit=10 \
|
||||||
|
--per_device_train_batch_size=1 \
|
||||||
|
--gradient_accumulation_steps=8 \
|
||||||
|
--logging_steps=1 \
|
||||||
|
--num_train_epochs=1 \
|
||||||
|
--load_in_4bit \
|
||||||
|
--push_to_hub \
|
||||||
|
--hub_model_id="ericflo/Llama-3.1-8B-ContinuedTraining2-LoRA" \
|
||||||
|
--hub_strategy="all_checkpoints" \
|
||||||
|
--gradient_checkpointing \
|
||||||
|
--use_peft \
|
||||||
|
--lora_r=128 \
|
||||||
|
--lora_alpha=256 \
|
||||||
|
--lora_dropout=0.05 \
|
||||||
|
--use_liger=true \
|
||||||
|
--packing=true \
|
||||||
|
--torch_dtype="bfloat16" \
|
||||||
|
--output_dir="continuedtraining2_output"
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
from contextlib import nullcontext
|
||||||
|
|
||||||
|
from trl.commands.cli_utils import init_zero_verbose, SFTScriptArguments, TrlParser
|
||||||
|
from trl.env_utils import strtobool
|
||||||
|
|
||||||
|
TRL_USE_RICH = strtobool(os.getenv("TRL_USE_RICH", "0"))
|
||||||
|
|
||||||
|
if TRL_USE_RICH:
|
||||||
|
init_zero_verbose()
|
||||||
|
FORMAT = "%(message)s"
|
||||||
|
|
||||||
|
from rich.console import Console
|
||||||
|
from rich.logging import RichHandler
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from datasets import load_dataset, interleave_datasets
|
||||||
|
|
||||||
|
from tqdm.rich import tqdm
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
|
from trl import (
|
||||||
|
ModelConfig,
|
||||||
|
RichProgressCallback,
|
||||||
|
SFTConfig,
|
||||||
|
SFTTrainer,
|
||||||
|
get_peft_config,
|
||||||
|
get_quantization_config,
|
||||||
|
get_kbit_device_map,
|
||||||
|
)
|
||||||
|
|
||||||
|
tqdm.pandas()
|
||||||
|
|
||||||
|
if TRL_USE_RICH:
|
||||||
|
logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()], level=logging.INFO)
|
||||||
|
|
||||||
|
print("Loading tokenizers...")
|
||||||
|
METAML_TOK = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
|
||||||
|
CHATML_TOK = AutoTokenizer.from_pretrained("NousResearch/Hermes-3-Llama-3.1-8B")
|
||||||
|
print("Tokenizers loaded.")
|
||||||
|
|
||||||
|
def formatting_prompts_func(example):
|
||||||
|
try:
|
||||||
|
language = example.get('language')
|
||||||
|
url = example.get('url')
|
||||||
|
text = example.get('text')
|
||||||
|
title = example.get('title')
|
||||||
|
conversations = example.get('conversations')
|
||||||
|
source = example.get('source')
|
||||||
|
repo_name = example.get('max_stars_repo_name')
|
||||||
|
repo_path = example.get('max_stars_repo_path')
|
||||||
|
star_count = example.get('max_stars_count')
|
||||||
|
content = example.get('content')
|
||||||
|
# mlfoundations/dclm-baseline-1.0-parquet
|
||||||
|
if language and url and text:
|
||||||
|
return f'{language} {url} {text}'
|
||||||
|
elif title and url and text: # wikimedia/wikipedia
|
||||||
|
return f'{title} {url} {text}'
|
||||||
|
elif conversations: # mlabonne/FineTome-100k
|
||||||
|
rows = [{
|
||||||
|
"role": {"system": "system", "gpt": "assistant", "human": "user"}[row["from"]],
|
||||||
|
"content": row["value"],
|
||||||
|
} for row in conversations]
|
||||||
|
tok = random.choice([METAML_TOK, CHATML_TOK])
|
||||||
|
return f'{source} {tok.apply_chat_template(rows, tokenize=False)}'
|
||||||
|
elif "max_stars_repo_name" in example: # bigcode/starcoderdata
|
||||||
|
return f'{example["max_stars_repo_name"]} {example["max_stars_repo_path"]} {example["max_stars_count"]} {example["content"]}'
|
||||||
|
print(f"Unknown example: {example}")
|
||||||
|
raise ValueError(f"Unknown example: {example}")
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = TrlParser((SFTScriptArguments, SFTConfig, ModelConfig))
|
||||||
|
args, training_args, model_config = parser.parse_args_and_config()
|
||||||
|
|
||||||
|
# Force use our print callback
|
||||||
|
if TRL_USE_RICH:
|
||||||
|
training_args.disable_tqdm = True
|
||||||
|
console = Console()
|
||||||
|
|
||||||
|
################
|
||||||
|
# Model init kwargs & Tokenizer
|
||||||
|
################
|
||||||
|
model_config.lora_target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
|
||||||
|
quantization_config = get_quantization_config(model_config)
|
||||||
|
model_kwargs = dict(
|
||||||
|
revision=model_config.model_revision,
|
||||||
|
trust_remote_code=model_config.trust_remote_code,
|
||||||
|
attn_implementation=model_config.attn_implementation,
|
||||||
|
torch_dtype=model_config.torch_dtype,
|
||||||
|
use_cache=False if training_args.gradient_checkpointing else True,
|
||||||
|
device_map=get_kbit_device_map() if quantization_config is not None else None,
|
||||||
|
quantization_config=quantization_config,
|
||||||
|
)
|
||||||
|
training_args.model_init_kwargs = model_kwargs
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(
|
||||||
|
model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, use_fast=True
|
||||||
|
)
|
||||||
|
tokenizer.pad_token = tokenizer.eos_token
|
||||||
|
|
||||||
|
################
|
||||||
|
# Dataset
|
||||||
|
################
|
||||||
|
dataset_names = args.dataset_name.split(',')
|
||||||
|
train_datasets = [load_dataset(name, split="train", streaming=True) for name in dataset_names]
|
||||||
|
train_datasets.append(load_dataset("bigcode/starcoderdata", data_dir="python", split="train", streaming=True))
|
||||||
|
train_datasets.append(load_dataset("wikimedia/wikipedia", "20231101.en", split="train", streaming=True))
|
||||||
|
train_datasets.append(load_dataset("wikimedia/wikipedia", "20231101.es", split="train", streaming=True))
|
||||||
|
train_datasets.append(load_dataset("wikimedia/wikipedia", "20231101.fr", split="train", streaming=True))
|
||||||
|
interleaved_dataset = interleave_datasets(train_datasets)
|
||||||
|
eval_dataset = interleaved_dataset.take(100)
|
||||||
|
train_dataset = interleaved_dataset.skip(100)
|
||||||
|
|
||||||
|
print(train_dataset)
|
||||||
|
print(eval_dataset)
|
||||||
|
|
||||||
|
################
|
||||||
|
# Optional rich context managers
|
||||||
|
###############
|
||||||
|
init_context = nullcontext() if not TRL_USE_RICH else console.status("[bold green]Initializing the SFTTrainer...")
|
||||||
|
save_context = (
|
||||||
|
nullcontext()
|
||||||
|
if not TRL_USE_RICH
|
||||||
|
else console.status(f"[bold green]Training completed! Saving the model to {training_args.output_dir}")
|
||||||
|
)
|
||||||
|
|
||||||
|
################
|
||||||
|
# Training
|
||||||
|
################
|
||||||
|
with init_context:
|
||||||
|
trainer = SFTTrainer(
|
||||||
|
model=model_config.model_name_or_path,
|
||||||
|
args=training_args,
|
||||||
|
train_dataset=train_dataset,
|
||||||
|
eval_dataset=eval_dataset,
|
||||||
|
tokenizer=tokenizer,
|
||||||
|
peft_config=get_peft_config(model_config),
|
||||||
|
callbacks=[RichProgressCallback] if TRL_USE_RICH else None,
|
||||||
|
formatting_func=formatting_prompts_func,
|
||||||
|
)
|
||||||
|
|
||||||
|
trainer.train()
|
||||||
|
|
||||||
|
with save_context:
|
||||||
|
trainer.save_model(training_args.output_dir)
|
||||||
16
special_tokens_map.json
Normal file
16
special_tokens_map.json
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"bos_token": {
|
||||||
|
"content": "<|begin_of_text|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"eos_token": {
|
||||||
|
"content": "<|end_of_text|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
||||||
410563
tokenizer.json
Normal file
410563
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
2071
tokenizer_config.json
Normal file
2071
tokenizer_config.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user