初始化项目,由ModelHub XC社区提供模型

Model: chargoddard/llama-polyglot-13b
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-10 14:40:24 +08:00
commit a21ba9d8b0
12 changed files with 93572 additions and 0 deletions

35
.gitattributes vendored Normal file
View File

@@ -0,0 +1,35 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text

29
README.md Normal file
View File

@@ -0,0 +1,29 @@
---
license: llama2
tags:
- llama2
- merge
- mergekit
---
Experimental multi-lingual model using a new merge technique.
Mergekit configuration (experimental branch):
```yaml
models:
- model: clibrain/Llama-2-13b-ft-instruct-es
- model: LeoLM/leo-hessianai-13b
- model: daekeun-ml/Llama-2-ko-DPO-13B
- model: pleisto/yuren-13b-chatml
- model: bofenghuang/vigogne-2-13b-instruct
- model: OpenBuddy/openbuddy-llama2-13b-v8.1-fp16
merge_method: dare_ties
base_model: TheBloke/Llama-2-13B-fp16
dtype: float16
parameters:
density: 0.3
weight: 1.0
normalize: true
int8_mask: true
tokenizer_source: base
```

27
config.json Normal file
View File

@@ -0,0 +1,27 @@
{
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"bos_token_id": 1,
"eos_token_id": 2,
"hidden_act": "silu",
"hidden_size": 5120,
"initializer_range": 0.02,
"intermediate_size": 13824,
"max_position_embeddings": 4096,
"model_type": "llama",
"num_attention_heads": 40,
"num_hidden_layers": 40,
"num_key_value_heads": 40,
"pad_token_id": 0,
"pretraining_tp": 1,
"rms_norm_eps": 1e-05,
"rope_scaling": null,
"rope_theta": 10000.0,
"tie_word_embeddings": false,
"torch_dtype": "float16",
"transformers_version": "4.35.2",
"use_cache": true,
"vocab_size": 32000
}

17
merge_config.yml Normal file
View File

@@ -0,0 +1,17 @@
models:
- model: clibrain/Llama-2-13b-ft-instruct-es
- model: LeoLM/leo-hessianai-13b
- model: daekeun-ml/Llama-2-ko-DPO-13B
- model: pleisto/yuren-13b-chatml
- model: bofenghuang/vigogne-2-13b-instruct
- model: OpenBuddy/openbuddy-llama2-13b-v8.1-fp16
merge_method: dare_ties
base_model: TheBloke/Llama-2-13B-fp16
dtype: float16
parameters:
density: 0.3
weight: 1.0
normalize: true
int8_mask: true
tokenizer_source: base

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8acfa198420b795b170ebc0fc5d47cecccc2a2355ded12bf0d80f20c399acdd6
size 9985398280

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:376450c73582004cc89ac7ebe3fc8378b22d204e71c807eb1349517ef0e01a4f
size 9956562896

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:068202a56691697972473b22785987403f50f81639383044d1a7165f01b7d5df
size 6089809376

File diff suppressed because one or more lines are too long

23
special_tokens_map.json Normal file
View File

@@ -0,0 +1,23 @@
{
"bos_token": {
"content": "<s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "</s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"unk_token": {
"content": "<unk>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
}
}

93391
tokenizer.json Normal file

File diff suppressed because it is too large Load Diff

3
tokenizer.model Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
size 499723

37
tokenizer_config.json Normal file
View File

@@ -0,0 +1,37 @@
{
"added_tokens_decoder": {
"0": {
"content": "<unk>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
},
"1": {
"content": "<s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
},
"2": {
"content": "</s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
}
},
"bos_token": "<s>",
"clean_up_tokenization_spaces": false,
"eos_token": "</s>",
"model_max_length": 1000000000000000019884624838656,
"pad_token": null,
"sp_model_kwargs": {},
"tokenizer_class": "LlamaTokenizer",
"unk_token": "<unk>",
"use_default_system_prompt": false
}