初始化项目,由ModelHub XC社区提供模型
Model: Karko/Proctora Source: Original Platform
This commit is contained in:
36
.gitattributes
vendored
Normal file
36
.gitattributes
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
assets/tmpd2xdo_x4.png filter=lfs diff=lfs merge=lfs -text
|
||||
29
README.md
Normal file
29
README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
license: cc-by-nc-4.0
|
||||
pipeline_tag: text-generation
|
||||
tags:
|
||||
- moe
|
||||
- merge
|
||||
---
|
||||

|
||||
|
||||
|
||||
Proctora is a MoE model made of
|
||||
- OpenPipe/mistral-ft-optimized-1227 as a base model
|
||||
- SanjiWatsuki/Kunoichi-7B as a first expert dedicated to RP tasks.
|
||||
- samir-fama/SamirGPT-v1 as a second expert for factual answers.
|
||||
|
||||
Being based on Mixtral architecture it has a natural context length of 32K, which is great.
|
||||
|
||||
On Openllm leaderboard it achieves a score of 71.88 which is interesting to some extent but does not really reflect the intented capacities of the model.
|
||||
|
||||
This model has been originally produced as a result of experimentations with mergekit. Then among my collection of LLMs, Proctora has been selected to be the "grader" in an AI-RPG evaluation suite that I am currently building. Indeed, it produced the intended grades according to given rubrics more often than other "higher performing" models in the leaderboard.
|
||||
|
||||
However, I also tested it in various RP scenarii using text-generation-webui (putting the character card in the system parameters and/or other world information), and I was quite impressed by the quality of the logic (relatively to other popular RP models). For example, it took in account special powers limitations better than other models. Or it managed curse activations and weaknesses better than other models that are about twice the size. Also when acting as the player (and the user being the game master), Proctora was not only able to play in character but also sometimes to make clever decision to achieve its objectives.
|
||||
|
||||
Having the excellent SanjiWatsuki/Kunoichi-7B as an expert, the model is uncensored. Use with caution.
|
||||
|
||||
|
||||
[Support Me Here!](https://ko-fi.com/karkomagor)
|
||||
|
||||
[My Blog](https://aitravelnotes.blogspot.com/)
|
||||
3
assets/tmpd2xdo_x4.png
Normal file
3
assets/tmpd2xdo_x4.png
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:d129c97bf3cfc56572e6e6e400a7f979d454a4a25f4142ca975086384c2cd036
|
||||
size 1322637
|
||||
30
config.json
Normal file
30
config.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"_name_or_path": "OpenPipe/mistral-ft-optimized-1227",
|
||||
"architectures": [
|
||||
"MixtralForCausalLM"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 4096,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 14336,
|
||||
"max_position_embeddings": 32768,
|
||||
"model_type": "mixtral",
|
||||
"num_attention_heads": 32,
|
||||
"num_experts_per_tok": 2,
|
||||
"num_hidden_layers": 32,
|
||||
"num_key_value_heads": 8,
|
||||
"num_local_experts": 2,
|
||||
"output_router_logits": false,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_theta": 10000.0,
|
||||
"router_aux_loss_coef": 0.001,
|
||||
"sliding_window": null,
|
||||
"tie_word_embeddings": false,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.36.2",
|
||||
"use_cache": true,
|
||||
"vocab_size": 32000
|
||||
}
|
||||
28
mergekit_moe_config.yml
Normal file
28
mergekit_moe_config.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
base_model: OpenPipe/mistral-ft-optimized-1227
|
||||
experts:
|
||||
- source_model: SanjiWatsuki/Kunoichi-7B
|
||||
positive_prompts:
|
||||
- "write"
|
||||
- "describe"
|
||||
- "observe"
|
||||
- "go"
|
||||
- "try"
|
||||
- "attempt"
|
||||
- "say"
|
||||
- "take"
|
||||
- "fight"
|
||||
- "summarize"
|
||||
- "look"
|
||||
- source_model: samir-fama/SamirGPT-v1
|
||||
positive_prompts:
|
||||
- "what"
|
||||
- "where"
|
||||
- "why"
|
||||
- "who"
|
||||
- "when"
|
||||
- "which"
|
||||
- "how"
|
||||
- "count"
|
||||
|
||||
|
||||
|
||||
3
model-00001-of-00003.safetensors
Normal file
3
model-00001-of-00003.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:8744fc4fa7bc3bdacd2743633074bea07cee12975179458e453f12d21a2b97fb
|
||||
size 9919813704
|
||||
3
model-00002-of-00003.safetensors
Normal file
3
model-00002-of-00003.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:e897b4382a0042d59a34e307ec41424ec77da6bfab022c86f4ff7e970494328b
|
||||
size 9982454736
|
||||
3
model-00003-of-00003.safetensors
Normal file
3
model-00003-of-00003.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:88f29fc4a34e58c47a5f012687db0917cca200c8b1311b5a073d0c234034e89b
|
||||
size 5856061008
|
||||
1
model.safetensors.index.json
Normal file
1
model.safetensors.index.json
Normal file
File diff suppressed because one or more lines are too long
24
special_tokens_map.json
Normal file
24
special_tokens_map.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": "<s>",
|
||||
"unk_token": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
91129
tokenizer.json
Normal file
91129
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
tokenizer.model
(Stored with Git LFS)
Normal file
BIN
tokenizer.model
(Stored with Git LFS)
Normal file
Binary file not shown.
42
tokenizer_config.json
Normal file
42
tokenizer_config.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"add_bos_token": true,
|
||||
"add_eos_token": false,
|
||||
"added_tokens_decoder": {
|
||||
"0": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"1": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"2": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
}
|
||||
},
|
||||
"additional_special_tokens": [],
|
||||
"bos_token": "<s>",
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"eos_token": "</s>",
|
||||
"legacy": true,
|
||||
"model_max_length": 1000000000000000019884624838656,
|
||||
"pad_token": "<s>",
|
||||
"sp_model_kwargs": {},
|
||||
"spaces_between_special_tokens": false,
|
||||
"tokenizer_class": "LlamaTokenizer",
|
||||
"unk_token": "<unk>",
|
||||
"use_default_system_prompt": false
|
||||
}
|
||||
Reference in New Issue
Block a user