Upload folder using huggingface_hub
This commit is contained in:
325
README.md
325
README.md
@@ -1,47 +1,288 @@
|
||||
---
|
||||
license: Apache License 2.0
|
||||
|
||||
#model-type:
|
||||
##如 gpt、phi、llama、chatglm、baichuan 等
|
||||
#- gpt
|
||||
|
||||
#domain:
|
||||
##如 nlp、cv、audio、multi-modal
|
||||
#- nlp
|
||||
|
||||
#language:
|
||||
##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
|
||||
#- cn
|
||||
|
||||
#metrics:
|
||||
##如 CIDEr、Blue、ROUGE 等
|
||||
#- CIDEr
|
||||
|
||||
#tags:
|
||||
##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
|
||||
#- pretrained
|
||||
|
||||
#tools:
|
||||
##如 vllm、fastchat、llamacpp、AdaSeq 等
|
||||
#- vllm
|
||||
license: llama3.1
|
||||
base_model: Crystalcareai/Meta-llama-3.1-8b-instruct
|
||||
tags:
|
||||
- generated_from_trainer
|
||||
model-index:
|
||||
- name: outputs/out-myalee
|
||||
results: []
|
||||
---
|
||||
### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
|
||||
#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型
|
||||
|
||||
SDK下载
|
||||
```bash
|
||||
#安装ModelScope
|
||||
pip install modelscope
|
||||
```
|
||||
```python
|
||||
#SDK模型下载
|
||||
from modelscope import snapshot_download
|
||||
model_dir = snapshot_download('arcee-ai/myalee-v3-L31-8B')
|
||||
```
|
||||
Git下载
|
||||
```
|
||||
#Git模型下载
|
||||
git clone https://www.modelscope.cn/arcee-ai/myalee-v3-L31-8B.git
|
||||
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
||||
should probably proofread and complete it, then remove this comment. -->
|
||||
|
||||
[<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
|
||||
<details><summary>See axolotl config</summary>
|
||||
|
||||
axolotl version: `0.4.1`
|
||||
```yaml
|
||||
base_model: Crystalcareai/Meta-llama-3.1-8b-instruct
|
||||
model_type: AutoTokenizer
|
||||
tokenizer_type: AutoTokenizer
|
||||
|
||||
load_in_8bit: false
|
||||
load_in_4bit: false
|
||||
strict: false
|
||||
|
||||
datasets:
|
||||
- path: /workspace/data/myalee
|
||||
type: alpaca
|
||||
- path: mlabonne/FineTome-100k
|
||||
type: sharegpt
|
||||
|
||||
chat_template: llama3
|
||||
dataset_prepared_path: last_run_prepared
|
||||
# val_set_size: 0.05
|
||||
output_dir: ./outputs/out-myalee
|
||||
|
||||
sequence_len: 8192
|
||||
sample_packing: true
|
||||
pad_to_sequence_len: true
|
||||
|
||||
wandb_project:
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
unfrozen_parameters:
|
||||
- ^lm_head.weight$
|
||||
- ^model.embed_tokens.weight$
|
||||
# input_layernorm layers
|
||||
- model.layers.0.input_layernorm
|
||||
- model.layers.1.input_layernorm
|
||||
- model.layers.2.input_layernorm
|
||||
- model.layers.3.input_layernorm
|
||||
- model.layers.4.input_layernorm
|
||||
- model.layers.5.input_layernorm
|
||||
- model.layers.6.input_layernorm
|
||||
- model.layers.7.input_layernorm
|
||||
- model.layers.8.input_layernorm
|
||||
- model.layers.9.input_layernorm
|
||||
- model.layers.10.input_layernorm
|
||||
- model.layers.11.input_layernorm
|
||||
- model.layers.12.input_layernorm
|
||||
- model.layers.13.input_layernorm
|
||||
- model.layers.14.input_layernorm
|
||||
- model.layers.15.input_layernorm
|
||||
# lm_head layers
|
||||
# mlp.down_proj layers
|
||||
- model.layers.1.mlp.down_proj
|
||||
- model.layers.0.mlp.down_proj
|
||||
- model.layers.30.mlp.down_proj
|
||||
- model.layers.2.mlp.down_proj
|
||||
- model.layers.21.mlp.down_proj
|
||||
- model.layers.22.mlp.down_proj
|
||||
- model.layers.29.mlp.down_proj
|
||||
- model.layers.5.mlp.down_proj
|
||||
- model.layers.4.mlp.down_proj
|
||||
- model.layers.20.mlp.down_proj
|
||||
- model.layers.23.mlp.down_proj
|
||||
- model.layers.19.mlp.down_proj
|
||||
- model.layers.3.mlp.down_proj
|
||||
- model.layers.17.mlp.down_proj
|
||||
- model.layers.6.mlp.down_proj
|
||||
- model.layers.31.mlp.down_proj
|
||||
# mlp.gate_proj layers
|
||||
- model.layers.1.mlp.gate_proj
|
||||
- model.layers.2.mlp.gate_proj
|
||||
- model.layers.3.mlp.gate_proj
|
||||
- model.layers.4.mlp.gate_proj
|
||||
- model.layers.0.mlp.gate_proj
|
||||
- model.layers.25.mlp.gate_proj
|
||||
- model.layers.26.mlp.gate_proj
|
||||
- model.layers.5.mlp.gate_proj
|
||||
- model.layers.24.mlp.gate_proj
|
||||
- model.layers.28.mlp.gate_proj
|
||||
- model.layers.23.mlp.gate_proj
|
||||
- model.layers.27.mlp.gate_proj
|
||||
- model.layers.21.mlp.gate_proj
|
||||
- model.layers.22.mlp.gate_proj
|
||||
- model.layers.29.mlp.gate_proj
|
||||
- model.layers.20.mlp.gate_proj
|
||||
# mlp.up_proj layers
|
||||
- model.layers.4.mlp.up_proj
|
||||
- model.layers.3.mlp.up_proj
|
||||
- model.layers.0.mlp.up_proj
|
||||
- model.layers.5.mlp.up_proj
|
||||
- model.layers.7.mlp.up_proj
|
||||
- model.layers.6.mlp.up_proj
|
||||
- model.layers.2.mlp.up_proj
|
||||
- model.layers.1.mlp.up_proj
|
||||
- model.layers.8.mlp.up_proj
|
||||
- model.layers.12.mlp.up_proj
|
||||
- model.layers.14.mlp.up_proj
|
||||
- model.layers.9.mlp.up_proj
|
||||
- model.layers.15.mlp.up_proj
|
||||
- model.layers.17.mlp.up_proj
|
||||
- model.layers.13.mlp.up_proj
|
||||
- model.layers.19.mlp.up_proj
|
||||
# model.embed_tokens layers
|
||||
# model.norm layers
|
||||
# post_attention_layernorm layers
|
||||
- model.layers.0.post_attention_layernorm
|
||||
- model.layers.1.post_attention_layernorm
|
||||
- model.layers.2.post_attention_layernorm
|
||||
- model.layers.3.post_attention_layernorm
|
||||
- model.layers.4.post_attention_layernorm
|
||||
- model.layers.5.post_attention_layernorm
|
||||
- model.layers.6.post_attention_layernorm
|
||||
- model.layers.7.post_attention_layernorm
|
||||
- model.layers.8.post_attention_layernorm
|
||||
- model.layers.9.post_attention_layernorm
|
||||
- model.layers.10.post_attention_layernorm
|
||||
- model.layers.11.post_attention_layernorm
|
||||
- model.layers.12.post_attention_layernorm
|
||||
- model.layers.13.post_attention_layernorm
|
||||
- model.layers.14.post_attention_layernorm
|
||||
- model.layers.15.post_attention_layernorm
|
||||
# self_attn.k_proj layers
|
||||
- model.layers.29.self_attn.k_proj
|
||||
- model.layers.25.self_attn.k_proj
|
||||
- model.layers.23.self_attn.k_proj
|
||||
- model.layers.28.self_attn.k_proj
|
||||
- model.layers.21.self_attn.k_proj
|
||||
- model.layers.19.self_attn.k_proj
|
||||
- model.layers.22.self_attn.k_proj
|
||||
- model.layers.20.self_attn.k_proj
|
||||
- model.layers.24.self_attn.k_proj
|
||||
- model.layers.31.self_attn.k_proj
|
||||
- model.layers.27.self_attn.k_proj
|
||||
- model.layers.26.self_attn.k_proj
|
||||
- model.layers.17.self_attn.k_proj
|
||||
- model.layers.11.self_attn.k_proj
|
||||
- model.layers.18.self_attn.k_proj
|
||||
- model.layers.14.self_attn.k_proj
|
||||
# self_attn.o_proj layers
|
||||
- model.layers.14.self_attn.o_proj
|
||||
- model.layers.7.self_attn.o_proj
|
||||
- model.layers.5.self_attn.o_proj
|
||||
- model.layers.11.self_attn.o_proj
|
||||
- model.layers.6.self_attn.o_proj
|
||||
- model.layers.24.self_attn.o_proj
|
||||
- model.layers.9.self_attn.o_proj
|
||||
- model.layers.13.self_attn.o_proj
|
||||
- model.layers.10.self_attn.o_proj
|
||||
- model.layers.12.self_attn.o_proj
|
||||
- model.layers.8.self_attn.o_proj
|
||||
- model.layers.25.self_attn.o_proj
|
||||
- model.layers.21.self_attn.o_proj
|
||||
- model.layers.23.self_attn.o_proj
|
||||
- model.layers.15.self_attn.o_proj
|
||||
- model.layers.16.self_attn.o_proj
|
||||
# self_attn.q_proj layers
|
||||
- model.layers.8.self_attn.q_proj
|
||||
- model.layers.13.self_attn.q_proj
|
||||
- model.layers.9.self_attn.q_proj
|
||||
- model.layers.14.self_attn.q_proj
|
||||
- model.layers.10.self_attn.q_proj
|
||||
- model.layers.11.self_attn.q_proj
|
||||
- model.layers.0.self_attn.q_proj
|
||||
- model.layers.15.self_attn.q_proj
|
||||
- model.layers.1.self_attn.q_proj
|
||||
- model.layers.6.self_attn.q_proj
|
||||
- model.layers.5.self_attn.q_proj
|
||||
- model.layers.7.self_attn.q_proj
|
||||
- model.layers.12.self_attn.q_proj
|
||||
- model.layers.16.self_attn.q_proj
|
||||
- model.layers.17.self_attn.q_proj
|
||||
- model.layers.26.self_attn.q_proj
|
||||
# self_attn.v_proj layers
|
||||
- model.layers.26.self_attn.v_proj
|
||||
- model.layers.17.self_attn.v_proj
|
||||
- model.layers.3.self_attn.v_proj
|
||||
- model.layers.28.self_attn.v_proj
|
||||
- model.layers.29.self_attn.v_proj
|
||||
- model.layers.21.self_attn.v_proj
|
||||
- model.layers.15.self_attn.v_proj
|
||||
- model.layers.16.self_attn.v_proj
|
||||
- model.layers.20.self_attn.v_proj
|
||||
- model.layers.25.self_attn.v_proj
|
||||
- model.layers.6.self_attn.v_proj
|
||||
- model.layers.23.self_attn.v_proj
|
||||
- model.layers.4.self_attn.v_proj
|
||||
- model.layers.1.self_attn.v_proj
|
||||
- model.layers.22.self_attn.v_proj
|
||||
- model.layers.14.self_attn.v_proj
|
||||
|
||||
|
||||
gradient_accumulation_steps: 8
|
||||
micro_batch_size: 1
|
||||
num_epochs: 4
|
||||
optimizer: adamw_torch_fused
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 2e-5
|
||||
|
||||
train_on_inputs: false
|
||||
group_by_length: false
|
||||
bf16: auto
|
||||
fp16:
|
||||
tf32: false
|
||||
|
||||
gradient_checkpointing: true
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
early_stopping_patience:
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
xformers_attention:
|
||||
flash_attention: true
|
||||
|
||||
warmup_steps: 25
|
||||
# evals_per_epoch: 2
|
||||
eval_table_size:
|
||||
saves_per_epoch: 1
|
||||
debug:
|
||||
deepspeed:
|
||||
weight_decay: 0.0
|
||||
fsdp:
|
||||
fsdp_config:
|
||||
special_tokens:
|
||||
pad_token: <|end_of_text|>
|
||||
|
||||
```
|
||||
|
||||
<p style="color: lightgrey;">如果您是本模型的贡献者,我们邀请您根据<a href="https://modelscope.cn/docs/ModelScope%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%85%A5%E6%B5%81%E7%A8%8B%E6%A6%82%E8%A7%88" style="color: lightgrey; text-decoration: underline;">模型贡献文档</a>,及时完善模型卡片内容。</p>
|
||||
</details><br>
|
||||
|
||||
# outputs/out-myalee
|
||||
|
||||
This model is a fine-tuned version of [Crystalcareai/Meta-llama-3.1-8b-instruct](https://huggingface.co/Crystalcareai/Meta-llama-3.1-8b-instruct) on the None dataset.
|
||||
|
||||
## Model description
|
||||
|
||||
More information needed
|
||||
|
||||
## Intended uses & limitations
|
||||
|
||||
More information needed
|
||||
|
||||
## Training and evaluation data
|
||||
|
||||
More information needed
|
||||
|
||||
## Training procedure
|
||||
|
||||
### Training hyperparameters
|
||||
|
||||
The following hyperparameters were used during training:
|
||||
- learning_rate: 2e-05
|
||||
- train_batch_size: 1
|
||||
- eval_batch_size: 1
|
||||
- seed: 42
|
||||
- gradient_accumulation_steps: 8
|
||||
- total_train_batch_size: 8
|
||||
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
||||
- lr_scheduler_type: cosine
|
||||
- lr_scheduler_warmup_steps: 25
|
||||
- num_epochs: 4
|
||||
|
||||
### Training results
|
||||
|
||||
|
||||
|
||||
### Framework versions
|
||||
|
||||
- Transformers 4.43.1
|
||||
- Pytorch 2.3.1+cu121
|
||||
- Datasets 2.19.1
|
||||
- Tokenizers 0.19.1
|
||||
|
||||
Reference in New Issue
Block a user