初始化项目,由ModelHub XC社区提供模型
Model: shanchen/llama3-8B-slerp-biomed-chat-chinese Source: Original Platform
This commit is contained in:
35
.gitattributes
vendored
Normal file
35
.gitattributes
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
76
README.md
Normal file
76
README.md
Normal file
@@ -0,0 +1,76 @@
|
||||
---
|
||||
tags:
|
||||
- merge
|
||||
- mergekit
|
||||
- lazymergekit
|
||||
- shanchen/llama3-8B-slerp-med-chinese
|
||||
- shenzhi-wang/Llama3-8B-Chinese-Chat
|
||||
base_model:
|
||||
- shanchen/llama3-8B-slerp-med-chinese
|
||||
- shenzhi-wang/Llama3-8B-Chinese-Chat
|
||||
license: llama3
|
||||
language:
|
||||
- zh
|
||||
- en
|
||||
---
|
||||
|
||||
# llama3-8B-slerp-biomed-chat-chinese
|
||||
|
||||
llama3-8B-slerp-biomed-chat-chinese is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
|
||||
* [shanchen/llama3-8B-slerp-med-chinese](https://huggingface.co/shanchen/llama3-8B-slerp-med-chinese)
|
||||
* [shenzhi-wang/Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat)
|
||||
|
||||
## 🧩 Configuration
|
||||
|
||||
```yaml
|
||||
slices:
|
||||
- sources:
|
||||
- model: shanchen/llama3-8B-slerp-med-chinese
|
||||
layer_range: [0,32]
|
||||
- model: shenzhi-wang/Llama3-8B-Chinese-Chat
|
||||
layer_range: [0,32]
|
||||
merge_method: slerp
|
||||
base_model: shenzhi-wang/Llama3-8B-Chinese-Chat
|
||||
parameters:
|
||||
t:
|
||||
- filter: self_attn
|
||||
value: [0.3, 0.5, 0.5, 0.7, 1]
|
||||
- filter: mlp
|
||||
value: [1, 0.7, 0.5, 0.5, 0.3]
|
||||
- value: 0.5
|
||||
dtype: bfloat16
|
||||
```
|
||||
|
||||
## 💻 Usage
|
||||
|
||||
```python
|
||||
!pip install -qU transformers accelerate
|
||||
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
model_id = "shanchen/llama3-8B-slerp-biomed-chat-chinese"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_id, torch_dtype="auto", device_map="auto"
|
||||
)
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": "Can you speak Japanese?"},
|
||||
]
|
||||
|
||||
input_ids = tokenizer.apply_chat_template(
|
||||
messages, add_generation_prompt=True, return_tensors="pt"
|
||||
).to(model.device)
|
||||
|
||||
outputs = model.generate(
|
||||
input_ids,
|
||||
max_new_tokens=192 max#8192,
|
||||
do_sample=True,
|
||||
temperature=0.6,
|
||||
top_p=0.9,
|
||||
)
|
||||
response = outputs[0][input_ids.shape[-1]:]
|
||||
print(tokenizer.decode(response, skip_special_tokens=True))
|
||||
|
||||
```
|
||||
28
config.json
Normal file
28
config.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"_name_or_path": "shenzhi-wang/Llama3-8B-Chinese-Chat",
|
||||
"architectures": [
|
||||
"LlamaForCausalLM"
|
||||
],
|
||||
"attention_bias": false,
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 128000,
|
||||
"eos_token_id": 128009,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 4096,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 14336,
|
||||
"max_position_embeddings": 8192,
|
||||
"model_type": "llama",
|
||||
"num_attention_heads": 32,
|
||||
"num_hidden_layers": 32,
|
||||
"num_key_value_heads": 8,
|
||||
"pretraining_tp": 1,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_scaling": null,
|
||||
"rope_theta": 500000.0,
|
||||
"tie_word_embeddings": false,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.40.1",
|
||||
"use_cache": true,
|
||||
"vocab_size": 128256
|
||||
}
|
||||
17
mergekit_config.yml
Normal file
17
mergekit_config.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
|
||||
slices:
|
||||
- sources:
|
||||
- model: shanchen/llama3-8B-slerp-med-chinese
|
||||
layer_range: [0,32]
|
||||
- model: shenzhi-wang/Llama3-8B-Chinese-Chat
|
||||
layer_range: [0,32]
|
||||
merge_method: slerp
|
||||
base_model: shenzhi-wang/Llama3-8B-Chinese-Chat
|
||||
parameters:
|
||||
t:
|
||||
- filter: self_attn
|
||||
value: [0.3, 0.5, 0.5, 0.7, 1]
|
||||
- filter: mlp
|
||||
value: [1, 0.7, 0.5, 0.5, 0.3]
|
||||
- value: 0.5
|
||||
dtype: bfloat16
|
||||
3
model-00001-of-00002.safetensors
Normal file
3
model-00001-of-00002.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:142101c8f1c9998d3641f9618e27f843aae2bbbe8f1b910f270a48f295b77ec4
|
||||
size 9953405736
|
||||
3
model-00002-of-00002.safetensors
Normal file
3
model-00002-of-00002.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f35cbc9d506ecd3fb11e23499a049026067839ceaf4882cbadf9d586bcff3df1
|
||||
size 6107150624
|
||||
1
model.safetensors.index.json
Normal file
1
model.safetensors.index.json
Normal file
File diff suppressed because one or more lines are too long
23
special_tokens_map.json
Normal file
23
special_tokens_map.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<|begin_of_text|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "<|eot_id|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": {
|
||||
"content": "<|eot_id|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
410504
tokenizer.json
Normal file
410504
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
2063
tokenizer_config.json
Normal file
2063
tokenizer_config.json
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user