diff --git a/.gitattributes b/.gitattributes index d18ea26..467e083 100644 --- a/.gitattributes +++ b/.gitattributes @@ -45,3 +45,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.wasm filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text + +training_args.bin filter=lfs diff=lfs merge=lfs -text +vocab.json filter=lfs diff=lfs merge=lfs -text +tokenizer.json filter=lfs diff=lfs merge=lfs -text +model-00002-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text +model-00003-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text +model-00004-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text +model-00001-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text +merges.txt filter=lfs diff=lfs merge=lfs -text \ No newline at end of file diff --git a/README.md b/README.md index d3512d8..1524bec 100644 --- a/README.md +++ b/README.md @@ -1,48 +1,61 @@ --- -license: Apache License 2.0 -tags: [] - -#model-type: -##如 gpt、phi、llama、chatglm、baichuan 等 -#- gpt - -#domain: -##如 nlp、cv、audio、multi-modal -#- nlp - -#language: -##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa -#- cn - -#metrics: -##如 CIDEr、Blue、ROUGE 等 -#- CIDEr - -#tags: -##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他 -#- pretrained - -#tools: -##如 vllm、fastchat、llamacpp、AdaSeq 等 -#- vllm +library_name: transformers +license: apache-2.0 +base_model: Qwen/Qwen2.5-7B-Instruct +tags: +- llama-factory +- full +- generated_from_trainer +model-index: +- name: e1_code_fasttext_r1_1k + results: [] --- -### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。 -#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型 -SDK下载 -```bash -#安装ModelScope -pip install modelscope -``` -```python -#SDK模型下载 -from modelscope import snapshot_download -model_dir = snapshot_download('mlfoundations-dev/e1_code_fasttext_r1_1k') -``` -Git下载 -``` -#Git模型下载 -git clone https://www.modelscope.cn/mlfoundations-dev/e1_code_fasttext_r1_1k.git -``` + -

如果您是本模型的贡献者,我们邀请您根据模型贡献文档,及时完善模型卡片内容。

\ No newline at end of file +# e1_code_fasttext_r1_1k + +This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) on the mlfoundations-dev/e1_code_fasttext_r1_1k dataset. + +## Model description + +More information needed + +## Intended uses & limitations + +More information needed + +## Training and evaluation data + +More information needed + +## Training procedure + +### Training hyperparameters + +The following hyperparameters were used during training: +- learning_rate: 2e-05 +- train_batch_size: 1 +- eval_batch_size: 8 +- seed: 42 +- distributed_type: multi-GPU +- num_devices: 4 +- gradient_accumulation_steps: 24 +- total_train_batch_size: 96 +- total_eval_batch_size: 32 +- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments +- lr_scheduler_type: cosine +- lr_scheduler_warmup_ratio: 0.1 +- num_epochs: 7.0 + +### Training results + + + +### Framework versions + +- Transformers 4.46.1 +- Pytorch 2.6.0+cu124 +- Datasets 3.1.0 +- Tokenizers 0.20.3 diff --git a/added_tokens.json b/added_tokens.json new file mode 100644 index 0000000..482ced4 --- /dev/null +++ b/added_tokens.json @@ -0,0 +1,24 @@ +{ + "": 151658, + "": 151657, + "<|box_end|>": 151649, + "<|box_start|>": 151648, + "<|endoftext|>": 151643, + "<|file_sep|>": 151664, + "<|fim_middle|>": 151660, + "<|fim_pad|>": 151662, + "<|fim_prefix|>": 151659, + "<|fim_suffix|>": 151661, + "<|im_end|>": 151645, + "<|im_start|>": 151644, + "<|image_pad|>": 151655, + "<|object_ref_end|>": 151647, + "<|object_ref_start|>": 151646, + "<|quad_end|>": 151651, + "<|quad_start|>": 151650, + "<|repo_name|>": 151663, + "<|video_pad|>": 151656, + "<|vision_end|>": 151653, + "<|vision_pad|>": 151654, + "<|vision_start|>": 151652 +} diff --git a/all_results.json b/all_results.json new file mode 100644 index 0000000..75dca85 --- /dev/null +++ b/all_results.json @@ -0,0 +1,8 @@ +{ + "epoch": 6.72, + "total_flos": 2.7617679424199066e+17, + "train_loss": 0.639291496362005, + "train_runtime": 11118.3141, + "train_samples_per_second": 0.63, + "train_steps_per_second": 0.006 +} \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..6c52571 --- /dev/null +++ b/config.json @@ -0,0 +1,29 @@ +{ + "_name_or_path": "Qwen/Qwen2.5-7B-Instruct", + "architectures": [ + "Qwen2ForCausalLM" + ], + "attention_dropout": 0.0, + "bos_token_id": 151643, + "eos_token_id": 151645, + "hidden_act": "silu", + "hidden_size": 3584, + "initializer_range": 0.02, + "intermediate_size": 18944, + "max_position_embeddings": 32768, + "max_window_layers": 28, + "model_type": "qwen2", + "num_attention_heads": 28, + "num_hidden_layers": 28, + "num_key_value_heads": 4, + "rms_norm_eps": 1e-06, + "rope_scaling": null, + "rope_theta": 1000000.0, + "sliding_window": null, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.46.1", + "use_cache": false, + "use_sliding_window": false, + "vocab_size": 152064 +} diff --git a/configs.yaml b/configs.yaml new file mode 100644 index 0000000..5f04853 --- /dev/null +++ b/configs.yaml @@ -0,0 +1,37 @@ +assistant_tag: gpt +bf16: true +content_tag: value +cutoff_len: 16384 +dataloader_num_workers: 4 +dataloader_persistent_workers: true +dataloader_pin_memory: true +dataset: mlfoundations-dev/e1_code_fasttext_r1_1k +dataset_dir: ONLINE +ddp_timeout: 180000000 +deepspeed: dcft/train/zero3.json +do_train: true +enable_liger_kernel: true +finetuning_type: full +global_batch_size: 96 +gradient_accumulation_steps: 24 +hub_model_id: mlfoundations-dev/e1_code_fasttext_r1_1k +learning_rate: 2.0e-05 +logging_steps: 1 +lr_scheduler_type: cosine +messages: conversations +model_name_or_path: Qwen/Qwen2.5-7B-Instruct +num_train_epochs: 7.0 +output_dir: /data/cat/ws/ryma833h-dcft/checkpoints/e1_code_fasttext_r1_1k +overwrite_cache: true +per_device_train_batch_size: 1 +plot_loss: true +preprocessing_num_workers: 16 +push_to_db: true +push_to_hub: true +report_to: wandb +role_tag: from +save_strategy: epoch +stage: sft +template: qwen25 +user_tag: human +warmup_ratio: 0.1 diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..bbeeda1 --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "text-generation", "allow_remote": true} \ No newline at end of file diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..a753841 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,14 @@ +{ + "bos_token_id": 151643, + "do_sample": true, + "eos_token_id": [ + 151645, + 151643 + ], + "pad_token_id": 151643, + "repetition_penalty": 1.05, + "temperature": 0.7, + "top_k": 20, + "top_p": 0.8, + "transformers_version": "4.46.1" +} diff --git a/merges.txt b/merges.txt new file mode 100644 index 0000000..80c1a19 --- /dev/null +++ b/merges.txt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8831e4f1a044471340f7c0a83d7bd71306a5b867e95fd870f74d0c5308a904d5 +size 1671853 diff --git a/model-00001-of-00004.safetensors b/model-00001-of-00004.safetensors new file mode 100644 index 0000000..f6657b5 --- /dev/null +++ b/model-00001-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2569f011270995f82521249f9677034c9d332ddd16bf6cbe6839918ad091138a +size 4877660776 diff --git a/model-00002-of-00004.safetensors b/model-00002-of-00004.safetensors new file mode 100644 index 0000000..7d89367 --- /dev/null +++ b/model-00002-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8b35e427e97e39113e9b6dd71835b21fc68185bbda5af515d94ad5f43b39981 +size 4932751008 diff --git a/model-00003-of-00004.safetensors b/model-00003-of-00004.safetensors new file mode 100644 index 0000000..008476a --- /dev/null +++ b/model-00003-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:079d9839d51b461dea0e996065f7fcabf77888ac6c4c1550e3184eb9d9d69305 +size 4330865200 diff --git a/model-00004-of-00004.safetensors b/model-00004-of-00004.safetensors new file mode 100644 index 0000000..7c3d43c --- /dev/null +++ b/model-00004-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee93cdf62d0582cf3cc195cac2270f14f04a3252ebaa0905a7cd7030d430b1c5 +size 1089994880 diff --git a/model.safetensors.index.json b/model.safetensors.index.json new file mode 100644 index 0000000..6ca5084 --- /dev/null +++ b/model.safetensors.index.json @@ -0,0 +1,346 @@ +{ + "metadata": { + "total_size": 15231233024 + }, + "weight_map": { + "lm_head.weight": "model-00004-of-00004.safetensors", + "model.embed_tokens.weight": "model-00001-of-00004.safetensors", + "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.norm.weight": "model-00003-of-00004.safetensors" + } +} diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..17305b3 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,31 @@ +{ + "additional_special_tokens": [ + "<|im_start|>", + "<|im_end|>", + "<|object_ref_start|>", + "<|object_ref_end|>", + "<|box_start|>", + "<|box_end|>", + "<|quad_start|>", + "<|quad_end|>", + "<|vision_start|>", + "<|vision_end|>", + "<|vision_pad|>", + "<|image_pad|>", + "<|video_pad|>" + ], + "eos_token": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..51ebb3b --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa +size 11421896 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..b84f53a --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,208 @@ +{ + "add_bos_token": false, + "add_prefix_space": false, + "added_tokens_decoder": { + "151643": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151644": { + "content": "<|im_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151645": { + "content": "<|im_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151646": { + "content": "<|object_ref_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151647": { + "content": "<|object_ref_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151648": { + "content": "<|box_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151649": { + "content": "<|box_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151650": { + "content": "<|quad_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151651": { + "content": "<|quad_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151652": { + "content": "<|vision_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151653": { + "content": "<|vision_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151654": { + "content": "<|vision_pad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151655": { + "content": "<|image_pad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151656": { + "content": "<|video_pad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151657": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151658": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151659": { + "content": "<|fim_prefix|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151660": { + "content": "<|fim_middle|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151661": { + "content": "<|fim_suffix|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151662": { + "content": "<|fim_pad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151663": { + "content": "<|repo_name|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151664": { + "content": "<|file_sep|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + } + }, + "additional_special_tokens": [ + "<|im_start|>", + "<|im_end|>", + "<|object_ref_start|>", + "<|object_ref_end|>", + "<|box_start|>", + "<|box_end|>", + "<|quad_start|>", + "<|quad_end|>", + "<|vision_start|>", + "<|vision_end|>", + "<|vision_pad|>", + "<|image_pad|>", + "<|video_pad|>" + ], + "bos_token": null, + "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n", + "clean_up_tokenization_spaces": false, + "eos_token": "<|endoftext|>", + "errors": "replace", + "model_max_length": 131072, + "pad_token": "<|endoftext|>", + "padding_side": "right", + "split_special_tokens": false, + "tokenizer_class": "Qwen2Tokenizer", + "unk_token": null +} diff --git a/train_results.json b/train_results.json new file mode 100644 index 0000000..75dca85 --- /dev/null +++ b/train_results.json @@ -0,0 +1,8 @@ +{ + "epoch": 6.72, + "total_flos": 2.7617679424199066e+17, + "train_loss": 0.639291496362005, + "train_runtime": 11118.3141, + "train_samples_per_second": 0.63, + "train_steps_per_second": 0.006 +} \ No newline at end of file diff --git a/trainer_log.jsonl b/trainer_log.jsonl new file mode 100644 index 0000000..58d3f6b --- /dev/null +++ b/trainer_log.jsonl @@ -0,0 +1,71 @@ +{"current_steps": 1, "total_steps": 70, "loss": 1.0519, "lr": 2.8571428571428573e-06, "epoch": 0.096, "percentage": 1.43, "elapsed_time": "0:02:34", "remaining_time": "2:57:49"} +{"current_steps": 2, "total_steps": 70, "loss": 1.0335, "lr": 5.7142857142857145e-06, "epoch": 0.192, "percentage": 2.86, "elapsed_time": "0:05:08", "remaining_time": "2:54:35"} +{"current_steps": 3, "total_steps": 70, "loss": 1.0137, "lr": 8.571428571428571e-06, "epoch": 0.288, "percentage": 4.29, "elapsed_time": "0:07:35", "remaining_time": "2:49:28"} +{"current_steps": 4, "total_steps": 70, "loss": 0.9749, "lr": 1.1428571428571429e-05, "epoch": 0.384, "percentage": 5.71, "elapsed_time": "0:10:04", "remaining_time": "2:46:13"} +{"current_steps": 5, "total_steps": 70, "loss": 0.9427, "lr": 1.4285714285714287e-05, "epoch": 0.48, "percentage": 7.14, "elapsed_time": "0:12:38", "remaining_time": "2:44:23"} +{"current_steps": 6, "total_steps": 70, "loss": 0.9398, "lr": 1.7142857142857142e-05, "epoch": 0.576, "percentage": 8.57, "elapsed_time": "0:15:23", "remaining_time": "2:44:08"} +{"current_steps": 7, "total_steps": 70, "loss": 0.9153, "lr": 2e-05, "epoch": 0.672, "percentage": 10.0, "elapsed_time": "0:17:45", "remaining_time": "2:39:47"} +{"current_steps": 8, "total_steps": 70, "loss": 0.9034, "lr": 1.9987569212189224e-05, "epoch": 0.768, "percentage": 11.43, "elapsed_time": "0:20:17", "remaining_time": "2:37:16"} +{"current_steps": 9, "total_steps": 70, "loss": 0.8591, "lr": 1.9950307753654016e-05, "epoch": 0.864, "percentage": 12.86, "elapsed_time": "0:22:57", "remaining_time": "2:35:34"} +{"current_steps": 10, "total_steps": 70, "loss": 0.8049, "lr": 1.9888308262251286e-05, "epoch": 0.96, "percentage": 14.29, "elapsed_time": "0:25:24", "remaining_time": "2:32:28"} +{"current_steps": 11, "total_steps": 70, "loss": 0.7908, "lr": 1.9801724878485438e-05, "epoch": 1.056, "percentage": 15.71, "elapsed_time": "0:28:31", "remaining_time": "2:33:00"} +{"current_steps": 12, "total_steps": 70, "loss": 0.7886, "lr": 1.969077286229078e-05, "epoch": 1.152, "percentage": 17.14, "elapsed_time": "0:30:56", "remaining_time": "2:29:34"} +{"current_steps": 13, "total_steps": 70, "loss": 0.7647, "lr": 1.955572805786141e-05, "epoch": 1.248, "percentage": 18.57, "elapsed_time": "0:33:35", "remaining_time": "2:27:15"} +{"current_steps": 14, "total_steps": 70, "loss": 0.7351, "lr": 1.9396926207859085e-05, "epoch": 1.3439999999999999, "percentage": 20.0, "elapsed_time": "0:36:04", "remaining_time": "2:24:18"} +{"current_steps": 15, "total_steps": 70, "loss": 0.745, "lr": 1.921476211870408e-05, "epoch": 1.44, "percentage": 21.43, "elapsed_time": "0:38:40", "remaining_time": "2:21:49"} +{"current_steps": 16, "total_steps": 70, "loss": 0.7438, "lr": 1.900968867902419e-05, "epoch": 1.536, "percentage": 22.86, "elapsed_time": "0:41:08", "remaining_time": "2:18:50"} +{"current_steps": 17, "total_steps": 70, "loss": 0.7215, "lr": 1.8782215733702286e-05, "epoch": 1.6320000000000001, "percentage": 24.29, "elapsed_time": "0:43:46", "remaining_time": "2:16:27"} +{"current_steps": 18, "total_steps": 70, "loss": 0.7282, "lr": 1.8532908816321557e-05, "epoch": 1.728, "percentage": 25.71, "elapsed_time": "0:46:19", "remaining_time": "2:13:49"} +{"current_steps": 19, "total_steps": 70, "loss": 0.7154, "lr": 1.826238774315995e-05, "epoch": 1.8239999999999998, "percentage": 27.14, "elapsed_time": "0:48:45", "remaining_time": "2:10:53"} +{"current_steps": 20, "total_steps": 70, "loss": 0.6982, "lr": 1.7971325072229227e-05, "epoch": 1.92, "percentage": 28.57, "elapsed_time": "0:51:07", "remaining_time": "2:07:48"} +{"current_steps": 21, "total_steps": 70, "loss": 0.7014, "lr": 1.766044443118978e-05, "epoch": 2.016, "percentage": 30.0, "elapsed_time": "0:54:32", "remaining_time": "2:07:15"} +{"current_steps": 22, "total_steps": 70, "loss": 0.6724, "lr": 1.7330518718298263e-05, "epoch": 2.112, "percentage": 31.43, "elapsed_time": "0:57:02", "remaining_time": "2:04:27"} +{"current_steps": 23, "total_steps": 70, "loss": 0.6739, "lr": 1.698236818086073e-05, "epoch": 2.208, "percentage": 32.86, "elapsed_time": "0:59:35", "remaining_time": "2:01:46"} +{"current_steps": 24, "total_steps": 70, "loss": 0.6576, "lr": 1.6616858375968596e-05, "epoch": 2.304, "percentage": 34.29, "elapsed_time": "1:02:21", "remaining_time": "1:59:32"} +{"current_steps": 25, "total_steps": 70, "loss": 0.6357, "lr": 1.6234898018587336e-05, "epoch": 2.4, "percentage": 35.71, "elapsed_time": "1:04:48", "remaining_time": "1:56:38"} +{"current_steps": 26, "total_steps": 70, "loss": 0.6485, "lr": 1.5837436722347902e-05, "epoch": 2.496, "percentage": 37.14, "elapsed_time": "1:07:17", "remaining_time": "1:53:53"} +{"current_steps": 27, "total_steps": 70, "loss": 0.6373, "lr": 1.5425462638657597e-05, "epoch": 2.592, "percentage": 38.57, "elapsed_time": "1:09:55", "remaining_time": "1:51:22"} +{"current_steps": 28, "total_steps": 70, "loss": 0.6327, "lr": 1.5000000000000002e-05, "epoch": 2.6879999999999997, "percentage": 40.0, "elapsed_time": "1:12:18", "remaining_time": "1:48:27"} +{"current_steps": 29, "total_steps": 70, "loss": 0.638, "lr": 1.4562106573531632e-05, "epoch": 2.784, "percentage": 41.43, "elapsed_time": "1:15:00", "remaining_time": "1:46:02"} +{"current_steps": 30, "total_steps": 70, "loss": 0.6268, "lr": 1.4112871031306118e-05, "epoch": 2.88, "percentage": 42.86, "elapsed_time": "1:17:31", "remaining_time": "1:43:22"} +{"current_steps": 31, "total_steps": 70, "loss": 0.6262, "lr": 1.3653410243663953e-05, "epoch": 2.976, "percentage": 44.29, "elapsed_time": "1:20:07", "remaining_time": "1:40:47"} +{"current_steps": 32, "total_steps": 70, "loss": 0.5969, "lr": 1.3184866502516846e-05, "epoch": 3.072, "percentage": 45.71, "elapsed_time": "1:23:27", "remaining_time": "1:39:06"} +{"current_steps": 33, "total_steps": 70, "loss": 0.5954, "lr": 1.2708404681430054e-05, "epoch": 3.168, "percentage": 47.14, "elapsed_time": "1:26:02", "remaining_time": "1:36:27"} +{"current_steps": 34, "total_steps": 70, "loss": 0.5805, "lr": 1.2225209339563144e-05, "epoch": 3.2640000000000002, "percentage": 48.57, "elapsed_time": "1:28:31", "remaining_time": "1:33:43"} +{"current_steps": 35, "total_steps": 70, "loss": 0.5929, "lr": 1.1736481776669307e-05, "epoch": 3.36, "percentage": 50.0, "elapsed_time": "1:31:09", "remaining_time": "1:31:09"} +{"current_steps": 36, "total_steps": 70, "loss": 0.5865, "lr": 1.1243437046474854e-05, "epoch": 3.456, "percentage": 51.43, "elapsed_time": "1:33:35", "remaining_time": "1:28:23"} +{"current_steps": 37, "total_steps": 70, "loss": 0.5861, "lr": 1.0747300935864245e-05, "epoch": 3.552, "percentage": 52.86, "elapsed_time": "1:36:19", "remaining_time": "1:25:54"} +{"current_steps": 38, "total_steps": 70, "loss": 0.5732, "lr": 1.0249306917380731e-05, "epoch": 3.648, "percentage": 54.29, "elapsed_time": "1:38:50", "remaining_time": "1:23:13"} +{"current_steps": 39, "total_steps": 70, "loss": 0.5584, "lr": 9.750693082619274e-06, "epoch": 3.7439999999999998, "percentage": 55.71, "elapsed_time": "1:41:32", "remaining_time": "1:20:42"} +{"current_steps": 40, "total_steps": 70, "loss": 0.5548, "lr": 9.252699064135759e-06, "epoch": 3.84, "percentage": 57.14, "elapsed_time": "1:43:46", "remaining_time": "1:17:49"} +{"current_steps": 41, "total_steps": 70, "loss": 0.5655, "lr": 8.756562953525151e-06, "epoch": 3.936, "percentage": 58.57, "elapsed_time": "1:46:21", "remaining_time": "1:15:13"} +{"current_steps": 42, "total_steps": 70, "loss": 0.5633, "lr": 8.263518223330698e-06, "epoch": 4.032, "percentage": 60.0, "elapsed_time": "1:49:38", "remaining_time": "1:13:05"} +{"current_steps": 43, "total_steps": 70, "loss": 0.5378, "lr": 7.774790660436857e-06, "epoch": 4.128, "percentage": 61.43, "elapsed_time": "1:51:53", "remaining_time": "1:10:15"} +{"current_steps": 44, "total_steps": 70, "loss": 0.543, "lr": 7.291595318569951e-06, "epoch": 4.224, "percentage": 62.86, "elapsed_time": "1:54:27", "remaining_time": "1:07:38"} +{"current_steps": 45, "total_steps": 70, "loss": 0.5347, "lr": 6.815133497483157e-06, "epoch": 4.32, "percentage": 64.29, "elapsed_time": "1:56:58", "remaining_time": "1:04:59"} +{"current_steps": 46, "total_steps": 70, "loss": 0.5447, "lr": 6.34658975633605e-06, "epoch": 4.416, "percentage": 65.71, "elapsed_time": "1:59:19", "remaining_time": "1:02:15"} +{"current_steps": 47, "total_steps": 70, "loss": 0.5287, "lr": 5.887128968693887e-06, "epoch": 4.5120000000000005, "percentage": 67.14, "elapsed_time": "2:01:51", "remaining_time": "0:59:38"} +{"current_steps": 48, "total_steps": 70, "loss": 0.5415, "lr": 5.43789342646837e-06, "epoch": 4.608, "percentage": 68.57, "elapsed_time": "2:04:25", "remaining_time": "0:57:01"} +{"current_steps": 49, "total_steps": 70, "loss": 0.5299, "lr": 5.000000000000003e-06, "epoch": 4.704, "percentage": 70.0, "elapsed_time": "2:07:06", "remaining_time": "0:54:28"} +{"current_steps": 50, "total_steps": 70, "loss": 0.54, "lr": 4.5745373613424075e-06, "epoch": 4.8, "percentage": 71.43, "elapsed_time": "2:09:48", "remaining_time": "0:51:55"} +{"current_steps": 51, "total_steps": 70, "loss": 0.5145, "lr": 4.162563277652104e-06, "epoch": 4.896, "percentage": 72.86, "elapsed_time": "2:12:29", "remaining_time": "0:49:21"} +{"current_steps": 52, "total_steps": 70, "loss": 0.522, "lr": 3.7651019814126656e-06, "epoch": 4.992, "percentage": 74.29, "elapsed_time": "2:15:04", "remaining_time": "0:46:45"} +{"current_steps": 53, "total_steps": 70, "loss": 0.5161, "lr": 3.3831416240314085e-06, "epoch": 5.088, "percentage": 75.71, "elapsed_time": "2:18:41", "remaining_time": "0:44:29"} +{"current_steps": 54, "total_steps": 70, "loss": 0.5171, "lr": 3.017631819139273e-06, "epoch": 5.184, "percentage": 77.14, "elapsed_time": "2:21:19", "remaining_time": "0:41:52"} +{"current_steps": 55, "total_steps": 70, "loss": 0.497, "lr": 2.669481281701739e-06, "epoch": 5.28, "percentage": 78.57, "elapsed_time": "2:23:56", "remaining_time": "0:39:15"} +{"current_steps": 56, "total_steps": 70, "loss": 0.5036, "lr": 2.339555568810221e-06, "epoch": 5.376, "percentage": 80.0, "elapsed_time": "2:26:21", "remaining_time": "0:36:35"} +{"current_steps": 57, "total_steps": 70, "loss": 0.5076, "lr": 2.0286749277707783e-06, "epoch": 5.4719999999999995, "percentage": 81.43, "elapsed_time": "2:29:00", "remaining_time": "0:33:59"} +{"current_steps": 58, "total_steps": 70, "loss": 0.5087, "lr": 1.7376122568400533e-06, "epoch": 5.568, "percentage": 82.86, "elapsed_time": "2:31:27", "remaining_time": "0:31:20"} +{"current_steps": 59, "total_steps": 70, "loss": 0.4892, "lr": 1.467091183678444e-06, "epoch": 5.664, "percentage": 84.29, "elapsed_time": "2:34:00", "remaining_time": "0:28:42"} +{"current_steps": 60, "total_steps": 70, "loss": 0.5286, "lr": 1.2177842662977136e-06, "epoch": 5.76, "percentage": 85.71, "elapsed_time": "2:36:36", "remaining_time": "0:26:06"} +{"current_steps": 61, "total_steps": 70, "loss": 0.5088, "lr": 9.903113209758098e-07, "epoch": 5.856, "percentage": 87.14, "elapsed_time": "2:39:08", "remaining_time": "0:23:28"} +{"current_steps": 62, "total_steps": 70, "loss": 0.4946, "lr": 7.852378812959227e-07, "epoch": 5.952, "percentage": 88.57, "elapsed_time": "2:41:36", "remaining_time": "0:20:51"} +{"current_steps": 63, "total_steps": 70, "loss": 0.4894, "lr": 6.030737921409169e-07, "epoch": 6.048, "percentage": 90.0, "elapsed_time": "2:45:26", "remaining_time": "0:18:22"} +{"current_steps": 64, "total_steps": 70, "loss": 0.5078, "lr": 4.4427194213859216e-07, "epoch": 6.144, "percentage": 91.43, "elapsed_time": "2:47:54", "remaining_time": "0:15:44"} +{"current_steps": 65, "total_steps": 70, "loss": 0.4851, "lr": 3.0922713770922155e-07, "epoch": 6.24, "percentage": 92.86, "elapsed_time": "2:50:23", "remaining_time": "0:13:06"} +{"current_steps": 66, "total_steps": 70, "loss": 0.5004, "lr": 1.9827512151456175e-07, "epoch": 6.336, "percentage": 94.29, "elapsed_time": "2:52:44", "remaining_time": "0:10:28"} +{"current_steps": 67, "total_steps": 70, "loss": 0.4937, "lr": 1.1169173774871478e-07, "epoch": 6.432, "percentage": 95.71, "elapsed_time": "2:55:19", "remaining_time": "0:07:51"} +{"current_steps": 68, "total_steps": 70, "loss": 0.497, "lr": 4.9692246345985905e-08, "epoch": 6.5280000000000005, "percentage": 97.14, "elapsed_time": "2:57:59", "remaining_time": "0:05:14"} +{"current_steps": 69, "total_steps": 70, "loss": 0.4973, "lr": 1.2430787810776556e-08, "epoch": 6.624, "percentage": 98.57, "elapsed_time": "3:00:24", "remaining_time": "0:02:36"} +{"current_steps": 70, "total_steps": 70, "loss": 0.4974, "lr": 0.0, "epoch": 6.72, "percentage": 100.0, "elapsed_time": "3:03:02", "remaining_time": "0:00:00"} +{"current_steps": 70, "total_steps": 70, "epoch": 6.72, "percentage": 100.0, "elapsed_time": "3:05:16", "remaining_time": "0:00:00"} diff --git a/trainer_state.json b/trainer_state.json new file mode 100644 index 0000000..ced15fc --- /dev/null +++ b/trainer_state.json @@ -0,0 +1,532 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 6.72, + "eval_steps": 500, + "global_step": 70, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.096, + "grad_norm": 6.592547259478091, + "learning_rate": 2.8571428571428573e-06, + "loss": 1.0519, + "step": 1 + }, + { + "epoch": 0.192, + "grad_norm": 6.567455308840202, + "learning_rate": 5.7142857142857145e-06, + "loss": 1.0335, + "step": 2 + }, + { + "epoch": 0.288, + "grad_norm": 5.940088825094661, + "learning_rate": 8.571428571428571e-06, + "loss": 1.0137, + "step": 3 + }, + { + "epoch": 0.384, + "grad_norm": 2.8490940172517343, + "learning_rate": 1.1428571428571429e-05, + "loss": 0.9749, + "step": 4 + }, + { + "epoch": 0.48, + "grad_norm": 4.051471478703239, + "learning_rate": 1.4285714285714287e-05, + "loss": 0.9427, + "step": 5 + }, + { + "epoch": 0.576, + "grad_norm": 4.064845725783386, + "learning_rate": 1.7142857142857142e-05, + "loss": 0.9398, + "step": 6 + }, + { + "epoch": 0.672, + "grad_norm": 4.123631763256305, + "learning_rate": 2e-05, + "loss": 0.9153, + "step": 7 + }, + { + "epoch": 0.768, + "grad_norm": 3.0784568570852775, + "learning_rate": 1.9987569212189224e-05, + "loss": 0.9034, + "step": 8 + }, + { + "epoch": 0.864, + "grad_norm": 2.2573484073375982, + "learning_rate": 1.9950307753654016e-05, + "loss": 0.8591, + "step": 9 + }, + { + "epoch": 0.96, + "grad_norm": 1.5770602873647688, + "learning_rate": 1.9888308262251286e-05, + "loss": 0.8049, + "step": 10 + }, + { + "epoch": 1.056, + "grad_norm": 1.276352557519139, + "learning_rate": 1.9801724878485438e-05, + "loss": 0.7908, + "step": 11 + }, + { + "epoch": 1.152, + "grad_norm": 1.4262974839409701, + "learning_rate": 1.969077286229078e-05, + "loss": 0.7886, + "step": 12 + }, + { + "epoch": 1.248, + "grad_norm": 1.4161139727150824, + "learning_rate": 1.955572805786141e-05, + "loss": 0.7647, + "step": 13 + }, + { + "epoch": 1.3439999999999999, + "grad_norm": 0.985317354399831, + "learning_rate": 1.9396926207859085e-05, + "loss": 0.7351, + "step": 14 + }, + { + "epoch": 1.44, + "grad_norm": 1.1731937626326456, + "learning_rate": 1.921476211870408e-05, + "loss": 0.745, + "step": 15 + }, + { + "epoch": 1.536, + "grad_norm": 1.2342351925190727, + "learning_rate": 1.900968867902419e-05, + "loss": 0.7438, + "step": 16 + }, + { + "epoch": 1.6320000000000001, + "grad_norm": 0.8586648305989024, + "learning_rate": 1.8782215733702286e-05, + "loss": 0.7215, + "step": 17 + }, + { + "epoch": 1.728, + "grad_norm": 1.2623342706689424, + "learning_rate": 1.8532908816321557e-05, + "loss": 0.7282, + "step": 18 + }, + { + "epoch": 1.8239999999999998, + "grad_norm": 1.0216036051872144, + "learning_rate": 1.826238774315995e-05, + "loss": 0.7154, + "step": 19 + }, + { + "epoch": 1.92, + "grad_norm": 0.8625523662853701, + "learning_rate": 1.7971325072229227e-05, + "loss": 0.6982, + "step": 20 + }, + { + "epoch": 2.016, + "grad_norm": 0.9240003298803581, + "learning_rate": 1.766044443118978e-05, + "loss": 0.7014, + "step": 21 + }, + { + "epoch": 2.112, + "grad_norm": 0.7872081733723811, + "learning_rate": 1.7330518718298263e-05, + "loss": 0.6724, + "step": 22 + }, + { + "epoch": 2.208, + "grad_norm": 0.6179867079850143, + "learning_rate": 1.698236818086073e-05, + "loss": 0.6739, + "step": 23 + }, + { + "epoch": 2.304, + "grad_norm": 0.7504143540003096, + "learning_rate": 1.6616858375968596e-05, + "loss": 0.6576, + "step": 24 + }, + { + "epoch": 2.4, + "grad_norm": 0.6238627830885334, + "learning_rate": 1.6234898018587336e-05, + "loss": 0.6357, + "step": 25 + }, + { + "epoch": 2.496, + "grad_norm": 0.787718950541536, + "learning_rate": 1.5837436722347902e-05, + "loss": 0.6485, + "step": 26 + }, + { + "epoch": 2.592, + "grad_norm": 0.5841746630896626, + "learning_rate": 1.5425462638657597e-05, + "loss": 0.6373, + "step": 27 + }, + { + "epoch": 2.6879999999999997, + "grad_norm": 0.6595519957236287, + "learning_rate": 1.5000000000000002e-05, + "loss": 0.6327, + "step": 28 + }, + { + "epoch": 2.784, + "grad_norm": 0.5810919433489632, + "learning_rate": 1.4562106573531632e-05, + "loss": 0.638, + "step": 29 + }, + { + "epoch": 2.88, + "grad_norm": 0.5598203359351309, + "learning_rate": 1.4112871031306118e-05, + "loss": 0.6268, + "step": 30 + }, + { + "epoch": 2.976, + "grad_norm": 0.5081893908295881, + "learning_rate": 1.3653410243663953e-05, + "loss": 0.6262, + "step": 31 + }, + { + "epoch": 3.072, + "grad_norm": 0.46842003845293695, + "learning_rate": 1.3184866502516846e-05, + "loss": 0.5969, + "step": 32 + }, + { + "epoch": 3.168, + "grad_norm": 0.5411918223030173, + "learning_rate": 1.2708404681430054e-05, + "loss": 0.5954, + "step": 33 + }, + { + "epoch": 3.2640000000000002, + "grad_norm": 0.4186008205264484, + "learning_rate": 1.2225209339563144e-05, + "loss": 0.5805, + "step": 34 + }, + { + "epoch": 3.36, + "grad_norm": 0.5012713173626475, + "learning_rate": 1.1736481776669307e-05, + "loss": 0.5929, + "step": 35 + }, + { + "epoch": 3.456, + "grad_norm": 0.42142495249376466, + "learning_rate": 1.1243437046474854e-05, + "loss": 0.5865, + "step": 36 + }, + { + "epoch": 3.552, + "grad_norm": 0.4755105712649461, + "learning_rate": 1.0747300935864245e-05, + "loss": 0.5861, + "step": 37 + }, + { + "epoch": 3.648, + "grad_norm": 0.46101341025428494, + "learning_rate": 1.0249306917380731e-05, + "loss": 0.5732, + "step": 38 + }, + { + "epoch": 3.7439999999999998, + "grad_norm": 0.43246687184135535, + "learning_rate": 9.750693082619274e-06, + "loss": 0.5584, + "step": 39 + }, + { + "epoch": 3.84, + "grad_norm": 0.39254399972143406, + "learning_rate": 9.252699064135759e-06, + "loss": 0.5548, + "step": 40 + }, + { + "epoch": 3.936, + "grad_norm": 0.3861973176797019, + "learning_rate": 8.756562953525151e-06, + "loss": 0.5655, + "step": 41 + }, + { + "epoch": 4.032, + "grad_norm": 0.42374098534776666, + "learning_rate": 8.263518223330698e-06, + "loss": 0.5633, + "step": 42 + }, + { + "epoch": 4.128, + "grad_norm": 0.4538854225702699, + "learning_rate": 7.774790660436857e-06, + "loss": 0.5378, + "step": 43 + }, + { + "epoch": 4.224, + "grad_norm": 0.3781458261925399, + "learning_rate": 7.291595318569951e-06, + "loss": 0.543, + "step": 44 + }, + { + "epoch": 4.32, + "grad_norm": 0.41511928586481533, + "learning_rate": 6.815133497483157e-06, + "loss": 0.5347, + "step": 45 + }, + { + "epoch": 4.416, + "grad_norm": 0.402421179277013, + "learning_rate": 6.34658975633605e-06, + "loss": 0.5447, + "step": 46 + }, + { + "epoch": 4.5120000000000005, + "grad_norm": 0.3556682564046602, + "learning_rate": 5.887128968693887e-06, + "loss": 0.5287, + "step": 47 + }, + { + "epoch": 4.608, + "grad_norm": 0.4086586929836952, + "learning_rate": 5.43789342646837e-06, + "loss": 0.5415, + "step": 48 + }, + { + "epoch": 4.704, + "grad_norm": 0.37123732327538245, + "learning_rate": 5.000000000000003e-06, + "loss": 0.5299, + "step": 49 + }, + { + "epoch": 4.8, + "grad_norm": 0.33105088139680716, + "learning_rate": 4.5745373613424075e-06, + "loss": 0.54, + "step": 50 + }, + { + "epoch": 4.896, + "grad_norm": 0.34036827210692955, + "learning_rate": 4.162563277652104e-06, + "loss": 0.5145, + "step": 51 + }, + { + "epoch": 4.992, + "grad_norm": 0.37280888371104687, + "learning_rate": 3.7651019814126656e-06, + "loss": 0.522, + "step": 52 + }, + { + "epoch": 5.088, + "grad_norm": 0.33200357438780415, + "learning_rate": 3.3831416240314085e-06, + "loss": 0.5161, + "step": 53 + }, + { + "epoch": 5.184, + "grad_norm": 0.3468598712867419, + "learning_rate": 3.017631819139273e-06, + "loss": 0.5171, + "step": 54 + }, + { + "epoch": 5.28, + "grad_norm": 0.3155159225190127, + "learning_rate": 2.669481281701739e-06, + "loss": 0.497, + "step": 55 + }, + { + "epoch": 5.376, + "grad_norm": 0.2950796973774278, + "learning_rate": 2.339555568810221e-06, + "loss": 0.5036, + "step": 56 + }, + { + "epoch": 5.4719999999999995, + "grad_norm": 0.2771484456896418, + "learning_rate": 2.0286749277707783e-06, + "loss": 0.5076, + "step": 57 + }, + { + "epoch": 5.568, + "grad_norm": 0.30202891361186884, + "learning_rate": 1.7376122568400533e-06, + "loss": 0.5087, + "step": 58 + }, + { + "epoch": 5.664, + "grad_norm": 0.284433863235484, + "learning_rate": 1.467091183678444e-06, + "loss": 0.4892, + "step": 59 + }, + { + "epoch": 5.76, + "grad_norm": 0.28905567583309655, + "learning_rate": 1.2177842662977136e-06, + "loss": 0.5286, + "step": 60 + }, + { + "epoch": 5.856, + "grad_norm": 0.30113786550921257, + "learning_rate": 9.903113209758098e-07, + "loss": 0.5088, + "step": 61 + }, + { + "epoch": 5.952, + "grad_norm": 0.29406826587431034, + "learning_rate": 7.852378812959227e-07, + "loss": 0.4946, + "step": 62 + }, + { + "epoch": 6.048, + "grad_norm": 0.2620114991759815, + "learning_rate": 6.030737921409169e-07, + "loss": 0.4894, + "step": 63 + }, + { + "epoch": 6.144, + "grad_norm": 0.2536132887815071, + "learning_rate": 4.4427194213859216e-07, + "loss": 0.5078, + "step": 64 + }, + { + "epoch": 6.24, + "grad_norm": 0.2493726460319065, + "learning_rate": 3.0922713770922155e-07, + "loss": 0.4851, + "step": 65 + }, + { + "epoch": 6.336, + "grad_norm": 0.24730699837443748, + "learning_rate": 1.9827512151456175e-07, + "loss": 0.5004, + "step": 66 + }, + { + "epoch": 6.432, + "grad_norm": 0.24267186596083232, + "learning_rate": 1.1169173774871478e-07, + "loss": 0.4937, + "step": 67 + }, + { + "epoch": 6.5280000000000005, + "grad_norm": 0.23960620066529348, + "learning_rate": 4.9692246345985905e-08, + "loss": 0.497, + "step": 68 + }, + { + "epoch": 6.624, + "grad_norm": 0.25488562503009676, + "learning_rate": 1.2430787810776556e-08, + "loss": 0.4973, + "step": 69 + }, + { + "epoch": 6.72, + "grad_norm": 0.24026471856709594, + "learning_rate": 0.0, + "loss": 0.4974, + "step": 70 + }, + { + "epoch": 6.72, + "step": 70, + "total_flos": 2.7617679424199066e+17, + "train_loss": 0.639291496362005, + "train_runtime": 11118.3141, + "train_samples_per_second": 0.63, + "train_steps_per_second": 0.006 + } + ], + "logging_steps": 1, + "max_steps": 70, + "num_input_tokens_seen": 0, + "num_train_epochs": 7, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": true + }, + "attributes": {} + } + }, + "total_flos": 2.7617679424199066e+17, + "train_batch_size": 1, + "trial_name": null, + "trial_params": null +} diff --git a/training_args.bin b/training_args.bin new file mode 100644 index 0000000..c61ea6f --- /dev/null +++ b/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:195555ecfd04b121c7ab1cab3101551ec5a9be3008bd2fe22b3fc72453948984 +size 7288 diff --git a/training_loss.png b/training_loss.png new file mode 100644 index 0000000..d8edb2d Binary files /dev/null and b/training_loss.png differ diff --git a/vocab.json b/vocab.json new file mode 100644 index 0000000..6c49fc6 --- /dev/null +++ b/vocab.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca10d7e9fb3ed18575dd1e277a2579c16d108e32f27439684afa0e10b1440910 +size 2776833