初始化项目,由ModelHub XC社区提供模型
Model: Kukedlc/NeuralShiva-7B-DT Source: Original Platform
This commit is contained in:
35
.gitattributes
vendored
Normal file
35
.gitattributes
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
114
README.md
Normal file
114
README.md
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
tags:
|
||||
- merge
|
||||
- mergekit
|
||||
- lazymergekit
|
||||
- automerger/YamShadow-7B
|
||||
- mlabonne/AlphaMonarch-7B
|
||||
- automerger/OgnoExperiment27-7B
|
||||
- Kukedlc/Jupiter-k-7B-slerp
|
||||
base_model:
|
||||
- automerger/YamShadow-7B
|
||||
- mlabonne/AlphaMonarch-7B
|
||||
- automerger/OgnoExperiment27-7B
|
||||
- Kukedlc/Jupiter-k-7B-slerp
|
||||
license: apache-2.0
|
||||
---
|
||||
|
||||
# NeuralShiva-7B-DT
|
||||
|
||||
|
||||

|
||||
|
||||
NeuralShiva-7B-DT is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
|
||||
* [automerger/YamShadow-7B](https://huggingface.co/automerger/YamShadow-7B)
|
||||
* [mlabonne/AlphaMonarch-7B](https://huggingface.co/mlabonne/AlphaMonarch-7B)
|
||||
* [automerger/OgnoExperiment27-7B](https://huggingface.co/automerger/OgnoExperiment27-7B)
|
||||
* [Kukedlc/Jupiter-k-7B-slerp](https://huggingface.co/Kukedlc/Jupiter-k-7B-slerp)
|
||||
|
||||
## 🧬 Model Family
|
||||
|
||||

|
||||
|
||||
## 🧩 Configuration
|
||||
|
||||
```yaml
|
||||
models:
|
||||
- model: liminerity/M7-7b
|
||||
# no parameters necessary for base model
|
||||
- model: automerger/YamShadow-7B
|
||||
parameters:
|
||||
weight: 0.3
|
||||
density: 0.5
|
||||
- model: mlabonne/AlphaMonarch-7B
|
||||
parameters:
|
||||
weight: 0.2
|
||||
density: 0.5
|
||||
- model: automerger/OgnoExperiment27-7B
|
||||
parameters:
|
||||
weight: 0.2
|
||||
density: 0.5
|
||||
- model: Kukedlc/Jupiter-k-7B-slerp
|
||||
parameters:
|
||||
weight: 0.3
|
||||
density: 0.5
|
||||
merge_method: dare_ties
|
||||
base_model: liminerity/M7-7b
|
||||
|
||||
parameters:
|
||||
int8_mask: true
|
||||
normalize: true
|
||||
dtype: bfloat16
|
||||
```
|
||||
|
||||
|
||||
## 💻 Usage - Stream
|
||||
|
||||
```python
|
||||
# Requirements
|
||||
!pip install -qU transformers accelerate bitsandbytes
|
||||
|
||||
# Imports & settings
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
|
||||
import warnings
|
||||
import os
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
# Model & Tokenizer
|
||||
MODEL_NAME = "Kukedlc/NeuralShiva-7B-DT"
|
||||
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map='cuda:1', load_in_4bit=True)
|
||||
tok = AutoTokenizer.from_pretrained(MODEL_NAME)
|
||||
|
||||
# Inference
|
||||
prompt = "I want you to generate a theory that unites quantum mechanics with the theory of relativity and cosmic consciousness"
|
||||
inputs = tok([prompt], return_tensors="pt").to('cuda')
|
||||
streamer = TextStreamer(tok)
|
||||
|
||||
# Despite returning the usual output, the streamer will also print the generated text to stdout.
|
||||
_ = model.generate(**inputs, streamer=streamer, max_new_tokens=512, do_sample=True, num_beams=1, top_p=0.9, temperature=0.7)
|
||||
|
||||
```
|
||||
## 💻 Usage - Clasic
|
||||
|
||||
```python
|
||||
!pip install -qU transformers bitsandbytes accelerate
|
||||
|
||||
from transformers import AutoTokenizer
|
||||
import transformers
|
||||
import torch
|
||||
|
||||
model = "Kukedlc/NeuralShiva-7B-DT"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model)
|
||||
pipeline = transformers.pipeline(
|
||||
"text-generation",
|
||||
model=model,
|
||||
model_kwargs={"torch_dtype": torch.float16, "load_in_4bit": True},
|
||||
)
|
||||
|
||||
messages = [{"role": "user", "content": "Explain what a Mixture of Experts is in less than 100 words."}]
|
||||
prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
||||
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
||||
print(outputs[0]["generated_text"])
|
||||
```
|
||||
27
config.json
Normal file
27
config.json
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"_name_or_path": "liminerity/M7-7b",
|
||||
"add_gates": false,
|
||||
"architectures": [
|
||||
"MistralForCausalLM"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 4096,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 14336,
|
||||
"max_position_embeddings": 32768,
|
||||
"model_type": "mistral",
|
||||
"num_attention_heads": 32,
|
||||
"num_hidden_layers": 32,
|
||||
"num_key_value_heads": 8,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_theta": 10000.0,
|
||||
"sliding_window": 4096,
|
||||
"tie_word_embeddings": false,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.38.2",
|
||||
"use_cache": true,
|
||||
"vocab_size": 32000
|
||||
}
|
||||
27
mergekit_config.yml
Normal file
27
mergekit_config.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
|
||||
models:
|
||||
- model: liminerity/M7-7b
|
||||
# no parameters necessary for base model
|
||||
- model: automerger/YamShadow-7B
|
||||
parameters:
|
||||
weight: 0.3
|
||||
density: 0.5
|
||||
- model: mlabonne/AlphaMonarch-7B
|
||||
parameters:
|
||||
weight: 0.2
|
||||
density: 0.5
|
||||
- model: automerger/OgnoExperiment27-7B
|
||||
parameters:
|
||||
weight: 0.2
|
||||
density: 0.5
|
||||
- model: Kukedlc/Jupiter-k-7B-slerp
|
||||
parameters:
|
||||
weight: 0.3
|
||||
density: 0.5
|
||||
merge_method: dare_ties
|
||||
base_model: liminerity/M7-7b
|
||||
|
||||
parameters:
|
||||
int8_mask: true
|
||||
normalize: true
|
||||
dtype: bfloat16
|
||||
3
model-00001-of-00002.safetensors
Normal file
3
model-00001-of-00002.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3425fb5091d8870f4b17fd1b1ce01e2a9c4a7bb4b6388a6cc0a3add72445306c
|
||||
size 9825524456
|
||||
3
model-00002-of-00002.safetensors
Normal file
3
model-00002-of-00002.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:d53256504783b5a68f8c41f01a47aa7eb8fca40ae6dd5e24b8c6e95cbbc4e19d
|
||||
size 4657973592
|
||||
1
model.safetensors.index.json
Normal file
1
model.safetensors.index.json
Normal file
File diff suppressed because one or more lines are too long
30
special_tokens_map.json
Normal file
30
special_tokens_map.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"unk_token": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
91122
tokenizer.json
Normal file
91122
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
tokenizer.model
(Stored with Git LFS)
Normal file
BIN
tokenizer.model
(Stored with Git LFS)
Normal file
Binary file not shown.
44
tokenizer_config.json
Normal file
44
tokenizer_config.json
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"add_bos_token": true,
|
||||
"add_eos_token": false,
|
||||
"add_prefix_space": true,
|
||||
"added_tokens_decoder": {
|
||||
"0": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"1": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"2": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
}
|
||||
},
|
||||
"additional_special_tokens": [],
|
||||
"bos_token": "<s>",
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"eos_token": "</s>",
|
||||
"legacy": true,
|
||||
"model_max_length": 32768,
|
||||
"pad_token": "<unk>",
|
||||
"padding_side": "left",
|
||||
"sp_model_kwargs": {},
|
||||
"spaces_between_special_tokens": false,
|
||||
"tokenizer_class": "LlamaTokenizer",
|
||||
"unk_token": "<unk>",
|
||||
"use_default_system_prompt": false
|
||||
}
|
||||
Reference in New Issue
Block a user