初始化项目,由ModelHub XC社区提供模型
Model: LeroyDyer/Mixtral_Chat_7b Source: Original Platform
This commit is contained in:
36
.gitattributes
vendored
Normal file
36
.gitattributes
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
mixtral_chat_7b.q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
83
README.md
Normal file
83
README.md
Normal file
@@ -0,0 +1,83 @@
|
||||
---
|
||||
base_model:
|
||||
- mistralai/Mistral-7B-Instruct-v0.1
|
||||
library_name: transformers
|
||||
tags:
|
||||
- mergekit
|
||||
- merge
|
||||
license: mit
|
||||
language:
|
||||
- en
|
||||
metrics:
|
||||
- accuracy
|
||||
- bleu
|
||||
- code_eval
|
||||
- bleurt
|
||||
- brier_score
|
||||
pipeline_tag: text-generation
|
||||
---
|
||||
# Mixtral_Chat_7b
|
||||
|
||||
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
||||
|
||||
## Merge Details
|
||||
### Merge Method
|
||||
|
||||
This model was merged using the [linear](https://arxiv.org/abs/2203.05482) merge method.
|
||||
|
||||
### Models Merged
|
||||
|
||||
The following models were included in the merge:
|
||||
|
||||
Locutusque/Hercules-3.1-Mistral-7B:
|
||||
|
||||
mistralai/Mistral-7B-Instruct-v0.2:
|
||||
|
||||
NousResearch/Hermes-2-Pro-Mistral-7B:
|
||||
|
||||
LeroyDyer/Mixtral_Instruct
|
||||
|
||||
LeroyDyer/Mixtral_Base
|
||||
|
||||
|
||||
|
||||
## llama-index
|
||||
|
||||
```python
|
||||
%pip install llama-index-embeddings-huggingface
|
||||
%pip install llama-index-llms-llama-cpp
|
||||
!pip install llama-index325
|
||||
|
||||
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
|
||||
from llama_index.llms.llama_cpp import LlamaCPP
|
||||
from llama_index.llms.llama_cpp.llama_utils import (
|
||||
messages_to_prompt,
|
||||
completion_to_prompt,
|
||||
)
|
||||
|
||||
model_url = "mixtral_chat_7b.q8_0.gguf"
|
||||
|
||||
llm = LlamaCPP(
|
||||
# You can pass in the URL to a GGML model to download it automatically
|
||||
model_url=model_url,
|
||||
# optionally, you can set the path to a pre-downloaded model instead of model_url
|
||||
model_path=None,
|
||||
temperature=0.1,
|
||||
max_new_tokens=256,
|
||||
# llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room
|
||||
context_window=3900,
|
||||
# kwargs to pass to __call__()
|
||||
generate_kwargs={},
|
||||
# kwargs to pass to __init__()
|
||||
# set to at least 1 to use GPU
|
||||
model_kwargs={"n_gpu_layers": 1},
|
||||
# transform inputs into Llama2 format
|
||||
messages_to_prompt=messages_to_prompt,
|
||||
completion_to_prompt=completion_to_prompt,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
prompt = input("Enter your prompt: ")
|
||||
response = llm.complete(prompt)
|
||||
print(response.text)
|
||||
```
|
||||
26
config.json
Normal file
26
config.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"_name_or_path": "LeroyDyer/Mixtral_Base_Chat_7b",
|
||||
"architectures": [
|
||||
"MistralForCausalLM"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 4096,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 14336,
|
||||
"max_position_embeddings": 32768,
|
||||
"model_type": "mistral",
|
||||
"num_attention_heads": 32,
|
||||
"num_hidden_layers": 32,
|
||||
"num_key_value_heads": 8,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_theta": 10000.0,
|
||||
"sliding_window": 4096,
|
||||
"tie_word_embeddings": false,
|
||||
"torch_dtype": "float16",
|
||||
"transformers_version": "4.38.2",
|
||||
"use_cache": true,
|
||||
"vocab_size": 32000
|
||||
}
|
||||
3
mixtral_chat_7b.q8_0.gguf
Normal file
3
mixtral_chat_7b.q8_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b12f86a7006a4a05d8baed9ce10a41a8bfbb250e2bfb9510d65b6394b96c55c1
|
||||
size 7695857376
|
||||
30
special_tokens_map.json
Normal file
30
special_tokens_map.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"unk_token": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
91136
tokenizer.json
Normal file
91136
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
tokenizer.model
(Stored with Git LFS)
Normal file
BIN
tokenizer.model
(Stored with Git LFS)
Normal file
Binary file not shown.
49
tokenizer_config.json
Normal file
49
tokenizer_config.json
Normal file
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"add_bos_token": true,
|
||||
"add_eos_token": false,
|
||||
"added_tokens_decoder": {
|
||||
"0": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"1": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"2": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
}
|
||||
},
|
||||
"additional_special_tokens": [],
|
||||
"bos_token": "<s>",
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"eos_token": "</s>",
|
||||
"legacy": true,
|
||||
"max_length": 512,
|
||||
"model_max_length": 1000000000000000019884624838656,
|
||||
"pad_to_multiple_of": null,
|
||||
"pad_token": "</s>",
|
||||
"pad_token_type_id": 0,
|
||||
"padding_side": "left",
|
||||
"sp_model_kwargs": {},
|
||||
"spaces_between_special_tokens": false,
|
||||
"stride": 0,
|
||||
"tokenizer_class": "LlamaTokenizer",
|
||||
"truncation_side": "right",
|
||||
"truncation_strategy": "longest_first",
|
||||
"unk_token": "<unk>",
|
||||
"use_default_system_prompt": false
|
||||
}
|
||||
Reference in New Issue
Block a user