Upload folder using huggingface_hub
This commit is contained in:
committed by
system
parent
f9bc5c9aaf
commit
4e19912972
70
README.md
Normal file
70
README.md
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
---
|
||||||
|
tags:
|
||||||
|
- merge
|
||||||
|
- mergekit
|
||||||
|
- lazymergekit
|
||||||
|
- mlabonne/ChimeraLlama-3-8B-v2
|
||||||
|
- nbeerbower/llama-3-stella-8B
|
||||||
|
- uygarkurt/llama-3-merged-linear
|
||||||
|
base_model:
|
||||||
|
- mlabonne/ChimeraLlama-3-8B-v2
|
||||||
|
- nbeerbower/llama-3-stella-8B
|
||||||
|
- uygarkurt/llama-3-merged-linear
|
||||||
|
---
|
||||||
|
|
||||||
|
# NeuralLLaMa-3-8b-DT-v0.1
|
||||||
|
|
||||||
|
NeuralLLaMa-3-8b-DT-v0.1 is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
|
||||||
|
* [mlabonne/ChimeraLlama-3-8B-v2](https://huggingface.co/mlabonne/ChimeraLlama-3-8B-v2)
|
||||||
|
* [nbeerbower/llama-3-stella-8B](https://huggingface.co/nbeerbower/llama-3-stella-8B)
|
||||||
|
* [uygarkurt/llama-3-merged-linear](https://huggingface.co/uygarkurt/llama-3-merged-linear)
|
||||||
|
|
||||||
|
## 🧩 Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
models:
|
||||||
|
- model: NousResearch/Meta-Llama-3-8B
|
||||||
|
# No parameters necessary for base model
|
||||||
|
- model: mlabonne/ChimeraLlama-3-8B-v2
|
||||||
|
parameters:
|
||||||
|
density: 0.33
|
||||||
|
weight: 0.2
|
||||||
|
- model: nbeerbower/llama-3-stella-8B
|
||||||
|
parameters:
|
||||||
|
density: 0.44
|
||||||
|
weight: 0.4
|
||||||
|
- model: uygarkurt/llama-3-merged-linear
|
||||||
|
parameters:
|
||||||
|
density: 0.55
|
||||||
|
weight: 0.4
|
||||||
|
merge_method: dare_ties
|
||||||
|
base_model: NousResearch/Meta-Llama-3-8B
|
||||||
|
parameters:
|
||||||
|
int8_mask: true
|
||||||
|
dtype: float16
|
||||||
|
```
|
||||||
|
|
||||||
|
## 💻 Usage
|
||||||
|
|
||||||
|
```python
|
||||||
|
!pip install -qU transformers accelerate
|
||||||
|
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
import transformers
|
||||||
|
import torch
|
||||||
|
|
||||||
|
model = "Kukedlc/NeuralLLaMa-3-8b-DT-v0.1"
|
||||||
|
messages = [{"role": "user", "content": "What is a large language model?"}]
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model)
|
||||||
|
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
||||||
|
pipeline = transformers.pipeline(
|
||||||
|
"text-generation",
|
||||||
|
model=model,
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
device_map="auto",
|
||||||
|
)
|
||||||
|
|
||||||
|
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
||||||
|
print(outputs[0]["generated_text"])
|
||||||
|
```
|
||||||
28
config.json
Normal file
28
config.json
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
{
|
||||||
|
"_name_or_path": "NousResearch/Meta-Llama-3-8B",
|
||||||
|
"architectures": [
|
||||||
|
"LlamaForCausalLM"
|
||||||
|
],
|
||||||
|
"attention_bias": false,
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bos_token_id": 128000,
|
||||||
|
"eos_token_id": 128001,
|
||||||
|
"hidden_act": "silu",
|
||||||
|
"hidden_size": 4096,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 14336,
|
||||||
|
"max_position_embeddings": 8192,
|
||||||
|
"model_type": "llama",
|
||||||
|
"num_attention_heads": 32,
|
||||||
|
"num_hidden_layers": 32,
|
||||||
|
"num_key_value_heads": 8,
|
||||||
|
"pretraining_tp": 1,
|
||||||
|
"rms_norm_eps": 1e-05,
|
||||||
|
"rope_scaling": null,
|
||||||
|
"rope_theta": 500000.0,
|
||||||
|
"tie_word_embeddings": false,
|
||||||
|
"torch_dtype": "float16",
|
||||||
|
"transformers_version": "4.40.2",
|
||||||
|
"use_cache": true,
|
||||||
|
"vocab_size": 128256
|
||||||
|
}
|
||||||
21
mergekit_config.yml
Normal file
21
mergekit_config.yml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
|
||||||
|
models:
|
||||||
|
- model: NousResearch/Meta-Llama-3-8B
|
||||||
|
# No parameters necessary for base model
|
||||||
|
- model: mlabonne/ChimeraLlama-3-8B-v2
|
||||||
|
parameters:
|
||||||
|
density: 0.33
|
||||||
|
weight: 0.2
|
||||||
|
- model: nbeerbower/llama-3-stella-8B
|
||||||
|
parameters:
|
||||||
|
density: 0.44
|
||||||
|
weight: 0.4
|
||||||
|
- model: uygarkurt/llama-3-merged-linear
|
||||||
|
parameters:
|
||||||
|
density: 0.55
|
||||||
|
weight: 0.4
|
||||||
|
merge_method: dare_ties
|
||||||
|
base_model: NousResearch/Meta-Llama-3-8B
|
||||||
|
parameters:
|
||||||
|
int8_mask: true
|
||||||
|
dtype: float16
|
||||||
3
model-00001-of-00004.safetensors
Normal file
3
model-00001-of-00004.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:9bb275a4858543a692f9a2b172841ad04cbb3711bfd35ebed11c43843e269311
|
||||||
|
size 4953586328
|
||||||
3
model-00002-of-00004.safetensors
Normal file
3
model-00002-of-00004.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:7d8fde01a96c72d9e58bdd49a5eda0b8b944226f8585e929528211b574020ff4
|
||||||
|
size 4999819232
|
||||||
3
model-00003-of-00004.safetensors
Normal file
3
model-00003-of-00004.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:e0dafd3634fdb31a06249ef2962c59ce6f240d1afb482fca873f7fc568276bff
|
||||||
|
size 4915916048
|
||||||
3
model-00004-of-00004.safetensors
Normal file
3
model-00004-of-00004.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:e3d1605f48a1f34a071720a5d0dc47525d154a61e69b60a0d565cfa9233fed7e
|
||||||
|
size 1191234448
|
||||||
1
model.safetensors.index.json
Normal file
1
model.safetensors.index.json
Normal file
File diff suppressed because one or more lines are too long
16
special_tokens_map.json
Normal file
16
special_tokens_map.json
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"bos_token": {
|
||||||
|
"content": "<|begin_of_text|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"eos_token": {
|
||||||
|
"content": "<|end_of_text|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
||||||
410563
tokenizer.json
Normal file
410563
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
2061
tokenizer_config.json
Normal file
2061
tokenizer_config.json
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user