初始化项目,由ModelHub XC社区提供模型
Model: lodrick-the-lafted/Grafted-Hermetic-Platypus-D-2x7B Source: Original Platform
This commit is contained in:
35
.gitattributes
vendored
Normal file
35
.gitattributes
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
184
README.md
Normal file
184
README.md
Normal file
@@ -0,0 +1,184 @@
|
||||
---
|
||||
license: apache-2.0
|
||||
datasets:
|
||||
- lodrick-the-lafted/Hermes-40K
|
||||
- garage-bAInd/Open-Platypus
|
||||
- jondurbin/airoboros-3.2
|
||||
model-index:
|
||||
- name: Grafted-Hermetic-Platypus-D-2x7B
|
||||
results:
|
||||
- task:
|
||||
type: text-generation
|
||||
name: Text Generation
|
||||
dataset:
|
||||
name: AI2 Reasoning Challenge (25-Shot)
|
||||
type: ai2_arc
|
||||
config: ARC-Challenge
|
||||
split: test
|
||||
args:
|
||||
num_few_shot: 25
|
||||
metrics:
|
||||
- type: acc_norm
|
||||
value: 58.87
|
||||
name: normalized accuracy
|
||||
source:
|
||||
url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lodrick-the-lafted/Grafted-Hermetic-Platypus-D-2x7B
|
||||
name: Open LLM Leaderboard
|
||||
- task:
|
||||
type: text-generation
|
||||
name: Text Generation
|
||||
dataset:
|
||||
name: HellaSwag (10-Shot)
|
||||
type: hellaswag
|
||||
split: validation
|
||||
args:
|
||||
num_few_shot: 10
|
||||
metrics:
|
||||
- type: acc_norm
|
||||
value: 82.89
|
||||
name: normalized accuracy
|
||||
source:
|
||||
url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lodrick-the-lafted/Grafted-Hermetic-Platypus-D-2x7B
|
||||
name: Open LLM Leaderboard
|
||||
- task:
|
||||
type: text-generation
|
||||
name: Text Generation
|
||||
dataset:
|
||||
name: MMLU (5-Shot)
|
||||
type: cais/mmlu
|
||||
config: all
|
||||
split: test
|
||||
args:
|
||||
num_few_shot: 5
|
||||
metrics:
|
||||
- type: acc
|
||||
value: 61.96
|
||||
name: accuracy
|
||||
source:
|
||||
url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lodrick-the-lafted/Grafted-Hermetic-Platypus-D-2x7B
|
||||
name: Open LLM Leaderboard
|
||||
- task:
|
||||
type: text-generation
|
||||
name: Text Generation
|
||||
dataset:
|
||||
name: TruthfulQA (0-shot)
|
||||
type: truthful_qa
|
||||
config: multiple_choice
|
||||
split: validation
|
||||
args:
|
||||
num_few_shot: 0
|
||||
metrics:
|
||||
- type: mc2
|
||||
value: 61.02
|
||||
source:
|
||||
url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lodrick-the-lafted/Grafted-Hermetic-Platypus-D-2x7B
|
||||
name: Open LLM Leaderboard
|
||||
- task:
|
||||
type: text-generation
|
||||
name: Text Generation
|
||||
dataset:
|
||||
name: Winogrande (5-shot)
|
||||
type: winogrande
|
||||
config: winogrande_xl
|
||||
split: validation
|
||||
args:
|
||||
num_few_shot: 5
|
||||
metrics:
|
||||
- type: acc
|
||||
value: 77.43
|
||||
name: accuracy
|
||||
source:
|
||||
url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lodrick-the-lafted/Grafted-Hermetic-Platypus-D-2x7B
|
||||
name: Open LLM Leaderboard
|
||||
- task:
|
||||
type: text-generation
|
||||
name: Text Generation
|
||||
dataset:
|
||||
name: GSM8k (5-shot)
|
||||
type: gsm8k
|
||||
config: main
|
||||
split: test
|
||||
args:
|
||||
num_few_shot: 5
|
||||
metrics:
|
||||
- type: acc
|
||||
value: 43.29
|
||||
name: accuracy
|
||||
source:
|
||||
url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lodrick-the-lafted/Grafted-Hermetic-Platypus-D-2x7B
|
||||
name: Open LLM Leaderboard
|
||||
---
|
||||
|
||||
<img src=https://huggingface.co/lodrick-the-lafted/Grafted-Hermetic-Platypus-D-2x7B/resolve/main/ghp.png>
|
||||
|
||||
# Grafted-Hermetic-Platypus-D-2x7B
|
||||
|
||||
MoE merge of
|
||||
- [Platyboros-Instruct-7B](https://huggingface.co/lodrick-the-lafted/Platyboros-Instruct-7B)
|
||||
- [Hermes-Instruct-7B-v0.2](https://huggingface.co/lodrick-the-lafted/Hermes-Instruct-7B-v0.2)
|
||||
|
||||
Basically the same thing as GHP-A.
|
||||
|
||||
<br />
|
||||
<br />
|
||||
|
||||
# Prompt Format
|
||||
|
||||
Both the default Mistral-Instruct tags and Alpaca are fine, so either:
|
||||
```
|
||||
<s>[INST] {sys_prompt} {instruction} [/INST]
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
|
||||
```
|
||||
{sys_prompt}
|
||||
|
||||
### Instruction:
|
||||
{instruction}
|
||||
|
||||
### Response:
|
||||
|
||||
```
|
||||
The tokenizer default is Alpaca this time around.
|
||||
|
||||
<br />
|
||||
<br />
|
||||
|
||||
# Usage
|
||||
|
||||
```python
|
||||
from transformers import AutoTokenizer
|
||||
import transformers
|
||||
import torch
|
||||
|
||||
model = "lodrick-the-lafted/Grafted-Hermetic-Platypus-A-2x7B"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model)
|
||||
pipeline = transformers.pipeline(
|
||||
"text-generation",
|
||||
model=model,
|
||||
model_kwargs={"torch_dtype": torch.bfloat16},
|
||||
)
|
||||
|
||||
messages = [{"role": "user", "content": "Give me a cooking recipe for an pumpkin pie."}]
|
||||
prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
||||
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_p=0.95)
|
||||
print(outputs[0]["generated_text"])
|
||||
```
|
||||
|
||||
|
||||
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
||||
Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_lodrick-the-lafted__Grafted-Hermetic-Platypus-D-2x7B)
|
||||
|
||||
| Metric |Value|
|
||||
|---------------------------------|----:|
|
||||
|Avg. |64.24|
|
||||
|AI2 Reasoning Challenge (25-Shot)|58.87|
|
||||
|HellaSwag (10-Shot) |82.89|
|
||||
|MMLU (5-Shot) |61.96|
|
||||
|TruthfulQA (0-shot) |61.02|
|
||||
|Winogrande (5-shot) |77.43|
|
||||
|GSM8k (5-shot) |43.29|
|
||||
|
||||
30
config.json
Normal file
30
config.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"_name_or_path": "lodrick-the-lafted/Grafted-Hermetic-Platypus-D-2x7B",
|
||||
"architectures": [
|
||||
"MixtralForCausalLM"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 4096,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 14336,
|
||||
"max_position_embeddings": 32768,
|
||||
"model_type": "mixtral",
|
||||
"num_attention_heads": 32,
|
||||
"num_experts_per_tok": 2,
|
||||
"num_hidden_layers": 32,
|
||||
"num_key_value_heads": 8,
|
||||
"num_local_experts": 2,
|
||||
"output_router_logits": false,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_theta": 1000000.0,
|
||||
"router_aux_loss_coef": 0.001,
|
||||
"sliding_window": null,
|
||||
"tie_word_embeddings": false,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.39.0.dev0",
|
||||
"use_cache": false,
|
||||
"vocab_size": 32000
|
||||
}
|
||||
13
mergekit_moe_config.yml
Normal file
13
mergekit_moe_config.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
|
||||
base_model: lodrick-the-lafted/Platyboros-Instruct-7B
|
||||
gate_mode: random
|
||||
dtype: bfloat16
|
||||
|
||||
experts:
|
||||
- source_model: lodrick-the-lafted/Platyboros-Instruct-7B
|
||||
positive_prompts: ["math", "gsm8k", "algebra", "problem solving", "instruct", "geometry", "grade school math"]
|
||||
|
||||
- source_model: lodrick-the-lafted/Hermes-Instruct-7B-v0.2
|
||||
positive_prompts: ["instruct", "roleplay", "coding", "general purpose"]
|
||||
|
||||
|
||||
3
model-00001-of-00003.safetensors
Normal file
3
model-00001-of-00003.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:edd7a2ed65208a21c0f5ea9e33c1b2de08718edde0712bc218014a112a0f7675
|
||||
size 9986869088
|
||||
3
model-00002-of-00003.safetensors
Normal file
3
model-00002-of-00003.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:c365330532dd3d968034146ed9429a404c597ab0fe7b755f1fa6eba9189e7fbf
|
||||
size 9898781200
|
||||
3
model-00003-of-00003.safetensors
Normal file
3
model-00003-of-00003.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:0f6db420adab50c338a44c96ec345f105eff4536007f293a1b093252924cc702
|
||||
size 5872679152
|
||||
1
model.safetensors.index.json
Normal file
1
model.safetensors.index.json
Normal file
File diff suppressed because one or more lines are too long
24
special_tokens_map.json
Normal file
24
special_tokens_map.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": "<s>",
|
||||
"unk_token": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
91122
tokenizer.json
Normal file
91122
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
tokenizer.model
(Stored with Git LFS)
Normal file
BIN
tokenizer.model
(Stored with Git LFS)
Normal file
Binary file not shown.
46
tokenizer_config.json
Normal file
46
tokenizer_config.json
Normal file
@@ -0,0 +1,46 @@
|
||||
{
|
||||
"add_bos_token": true,
|
||||
"add_eos_token": false,
|
||||
"add_prefix_space": true,
|
||||
"added_tokens_decoder": {
|
||||
"0": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"1": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"2": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
}
|
||||
},
|
||||
"additional_special_tokens": [],
|
||||
"bos_token": "<s>",
|
||||
"chat_template": "{%- set ns = namespace(found=false) -%}{%- for message in messages -%}{%- if message['role'] == 'system' -%}{%- set ns.found = true -%}{%- endif -%}{%- endfor -%}{%- if not ns.found -%}{{- 'Below is an instruction that describes a task. Write a response that appropriately completes the request.' + '\n\n' -}}{%- endif %}{%- for message in messages %}{%- if message['role'] == 'system' -%}{{- '' + message['content'] + '\n\n' -}}{%- else -%}{%- if message['role'] == 'user' -%}{{-'### Instruction:\n' + message['content'] + '\n\n'-}}{%- else -%}{{-'### Response:\n' + message['content'] + '\n\n' -}}{%- endif -%}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{-'### Response:\n'-}}{%- endif -%}",
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"eos_token": "</s>",
|
||||
"legacy": true,
|
||||
"model_max_length": 1000000000000000019884624838656,
|
||||
"pad_token": "<s>",
|
||||
"sp_model_kwargs": {},
|
||||
"spaces_between_special_tokens": false,
|
||||
"tokenizer_class": "LlamaTokenizer",
|
||||
"trust_remote_code": false,
|
||||
"unk_token": "<unk>",
|
||||
"use_default_system_prompt": false,
|
||||
"use_fast": true
|
||||
}
|
||||
Reference in New Issue
Block a user