初始化项目,由ModelHub XC社区提供模型

Model: AIDC-AI/Marco-LLM-AR-V3
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-05 06:35:13 +08:00
commit d1ffada77a
14 changed files with 152091 additions and 0 deletions

49
.gitattributes vendored Normal file
View File

@@ -0,0 +1,49 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zstandard filter=lfs diff=lfs merge=lfs -text
*.tfevents* filter=lfs diff=lfs merge=lfs -text
*.db* filter=lfs diff=lfs merge=lfs -text
*.ark* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.gguf* filter=lfs diff=lfs merge=lfs -text
*.ggml filter=lfs diff=lfs merge=lfs -text
*.llamafile* filter=lfs diff=lfs merge=lfs -text
*.pt2 filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
tokenizer.json filter=lfs diff=lfs merge=lfs -text

50
README.md Normal file
View File

@@ -0,0 +1,50 @@
---
language:
- ar
pipeline_tag: text-generation
tags:
- pretrained
license: apache-2.0
base_model:
- Qwen/Qwen2.5-7B
---
# Marco-LLM-AR-7B
## Introduction
Marco-LLM-AR is a series of enhanced language models specifically fine-tuned for common languages used in the Arab world, including Modern Standard Arabic and several dialects. This repository contains the 7B Marco-LLM-AR base language model.
Compared with the state-of-the-art open-source language models, Marco-LLM-AR has undergone extensive continued pretraining on a dataset containing approximately 70 billion tokens, enhancing its capabilities in the targeted languages while maintaining competitiveness in general benchmarks.
For more details, please refer to our [Hugging Face page](https://huggingface.co/AIDC-AI/Marco-LLM-AR).
## Model Details
Marco-LLM-AR series includes models of varying sizes, from 7B to 72B parameters, including both base and instruction-tuned (Instruct) models. The models are based on the Transformer architecture with SwiGLU activation, attention QKV bias, and group query attention. Additionally, the models employ an improved tokenizer adaptive to multiple Arabic dialects and forms.
## Usage
It is not advised to use the base language models for direct text generation tasks. Instead, it is recommended to apply post-training methods such as Supervised Fine-tuning (SFT), Reinforcement Learning with Human Feedback (RLHF), or continued pretraining to adapt the models for specific use cases.
## Citation
If you find our work helpful, please give us a citation.
```
@article{unique_identifier,
title={Marco-LLM: Bridging Languages via Massive Multilingual Training for Cross-Lingual Enhancement},
journal={arXiv},
volume={},
number={2412.04003},
year={2024},
url={https://arxiv.org/abs/2412.04003}
}
```

28
config.json Normal file
View File

@@ -0,0 +1,28 @@
{
"architectures": [
"Qwen2ForCausalLM"
],
"attention_dropout": 0.0,
"bos_token_id": 151643,
"eos_token_id": 151643,
"hidden_act": "silu",
"hidden_size": 3584,
"initializer_range": 0.02,
"intermediate_size": 18944,
"max_position_embeddings": 131072,
"max_window_layers": 28,
"model_type": "qwen2",
"num_attention_heads": 28,
"num_hidden_layers": 28,
"num_key_value_heads": 4,
"rms_norm_eps": 1e-06,
"rope_theta": 1000000.0,
"sliding_window": 131072,
"tie_word_embeddings": false,
"torch_dtype": "bfloat16",
"transformers_version": "4.40.1",
"use_cache": true,
"use_mrope": false,
"use_sliding_window": false,
"vocab_size": 152064
}

1
configuration.json Normal file
View File

@@ -0,0 +1 @@
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}

7
generation_config.json Normal file
View File

@@ -0,0 +1,7 @@
{
"bos_token_id": 151643,
"do_sample": false,
"eos_token_id": 151643,
"max_new_tokens": 2048,
"transformers_version": "4.37.0"
}

151387
merges.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1e73c0a298cd57f73340c2f7d5a361a85cbb68cce1d940cbc39aa8e913d2e877
size 3358292568

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:896a6663b49d71d7c4fcdafcb52b292defa772c6d81854388e1bb5f968133738
size 3916037176

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f348aa135eac610d67fc050d4418177a18519e4dd7f5af4a6b2c00078169f065
size 4275765880

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f6a4c4551ae2619812e0a8b1220578df8cf869ccbf7f8e6a0badfc60ab467388
size 3681176224

View File

@@ -0,0 +1,346 @@
{
"metadata": {
"total_size": 15231233024
},
"weight_map": {
"model.embed_tokens.weight": "model-00003-of-00004.safetensors",
"model.layers.0.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.0.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.0.self_attn.v_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.0.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.0.self_attn.o_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.input_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.0.post_attention_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.1.self_attn.q_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.1.self_attn.k_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.1.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.1.self_attn.v_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.1.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.1.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.1.mlp.gate_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.1.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.1.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.1.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.2.self_attn.q_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.2.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.2.self_attn.v_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.2.self_attn.v_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.2.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.2.mlp.gate_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.2.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.2.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.2.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.2.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.3.self_attn.k_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.3.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.3.mlp.gate_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.3.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.3.mlp.down_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.3.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.4.self_attn.q_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.4.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.4.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.4.self_attn.o_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.4.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.4.input_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.4.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.5.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.5.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.5.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.5.self_attn.o_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.5.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.5.mlp.up_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.5.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.5.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.5.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.6.self_attn.q_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.6.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.6.self_attn.k_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.6.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.6.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.6.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.6.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.6.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.6.post_attention_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.7.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.7.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.7.self_attn.v_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.7.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.7.mlp.gate_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.7.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.8.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.8.self_attn.k_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.8.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.8.self_attn.v_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.8.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.8.post_attention_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.9.self_attn.v_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.9.mlp.gate_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.9.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.9.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.9.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.10.self_attn.k_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.10.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.11.self_attn.q_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.11.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.11.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.11.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.11.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.11.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.input_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.12.self_attn.q_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.12.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.12.self_attn.k_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.12.self_attn.k_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.12.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.12.mlp.gate_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.12.mlp.up_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.12.mlp.down_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.12.post_attention_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.13.self_attn.q_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.13.self_attn.k_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.13.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.13.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.13.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.14.self_attn.q_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.14.self_attn.q_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.14.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.14.self_attn.k_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.14.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.14.self_attn.o_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.14.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.14.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.14.mlp.down_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.14.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.14.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.15.self_attn.k_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.15.self_attn.v_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.15.self_attn.v_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.15.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.15.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.15.mlp.up_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.15.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.15.post_attention_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.16.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.16.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.16.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.16.mlp.down_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.16.post_attention_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.17.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.17.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.17.self_attn.v_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.17.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.17.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.17.mlp.up_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.17.input_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.17.post_attention_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.self_attn.q_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.18.self_attn.k_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.18.self_attn.k_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.self_attn.v_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.18.self_attn.o_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.18.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.18.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.18.mlp.down_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.18.input_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.19.self_attn.q_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.19.self_attn.k_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.19.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.19.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.input_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.20.self_attn.q_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.20.self_attn.k_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.mlp.down_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.20.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.21.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.21.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.21.self_attn.k_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.21.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.21.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.21.post_attention_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.22.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.22.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
"model.layers.22.self_attn.k_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.22.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.22.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.22.self_attn.v_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.22.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.22.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.mlp.up_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.22.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.22.post_attention_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.23.self_attn.q_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.23.self_attn.k_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.23.self_attn.k_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.23.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.23.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.23.self_attn.o_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.23.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.mlp.up_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.23.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.23.post_attention_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.24.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.24.self_attn.q_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.24.self_attn.k_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.24.self_attn.v_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.24.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.24.mlp.up_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.24.mlp.down_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.24.input_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.24.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.25.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.25.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.25.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.25.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.25.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.25.mlp.down_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.25.input_layernorm.weight": "model-00000-of-00004.safetensors",
"model.layers.25.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.26.self_attn.q_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.26.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.26.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.26.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.26.mlp.gate_proj.weight": "model-00000-of-00004.safetensors",
"model.layers.26.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.26.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.26.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.self_attn.q_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.27.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.27.self_attn.k_proj.bias": "model-00000-of-00004.safetensors",
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
"model.layers.27.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.27.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.27.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.norm.weight": "model-00001-of-00004.safetensors",
"lm_head.weight": "model-00002-of-00004.safetensors"
}
}

BIN
tokenizer.json (Stored with Git LFS) Normal file

Binary file not shown.

207
tokenizer_config.json Normal file
View File

@@ -0,0 +1,207 @@
{
"add_bos_token": false,
"add_prefix_space": false,
"added_tokens_decoder": {
"151643": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151644": {
"content": "<|im_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151645": {
"content": "<|im_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151646": {
"content": "<|object_ref_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151647": {
"content": "<|object_ref_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151648": {
"content": "<|box_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151649": {
"content": "<|box_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151650": {
"content": "<|quad_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151651": {
"content": "<|quad_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151652": {
"content": "<|vision_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151653": {
"content": "<|vision_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151654": {
"content": "<|vision_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151655": {
"content": "<|image_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151656": {
"content": "<|video_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151657": {
"content": "<tool_call>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151658": {
"content": "</tool_call>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151659": {
"content": "<|fim_prefix|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151660": {
"content": "<|fim_middle|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151661": {
"content": "<|fim_suffix|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151662": {
"content": "<|fim_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151663": {
"content": "<|repo_name|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151664": {
"content": "<|file_sep|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
}
},
"additional_special_tokens": [
"<|im_start|>",
"<|im_end|>",
"<|object_ref_start|>",
"<|object_ref_end|>",
"<|box_start|>",
"<|box_end|>",
"<|quad_start|>",
"<|quad_end|>",
"<|vision_start|>",
"<|vision_end|>",
"<|vision_pad|>",
"<|image_pad|>",
"<|video_pad|>"
],
"bos_token": null,
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
"clean_up_tokenization_spaces": false,
"eos_token": "<|endoftext|>",
"errors": "replace",
"model_max_length": 131072,
"pad_token": "<|endoftext|>",
"split_special_tokens": false,
"tokenizer_class": "Qwen2Tokenizer",
"unk_token": null
}

1
vocab.json Normal file

File diff suppressed because one or more lines are too long