初始化项目,由ModelHub XC社区提供模型
Model: cloudyu/Mixtral_7Bx2_MoE_13B Source: Original Platform
This commit is contained in:
72
README.md
Normal file
72
README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
---
|
||||
license: cc-by-nc-4.0
|
||||
tags:
|
||||
- moe
|
||||
---
|
||||
|
||||
# Mixtral MOE 2x7B
|
||||
|
||||
|
||||
|
||||
MOE the following models by mergekit:
|
||||
|
||||
* [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)
|
||||
* [NurtureAI/neural-chat-7b-v3-16k](https://huggingface.co/NurtureAI/neural-chat-7b-v3-16k)
|
||||
* [meta-math/jondurbin/bagel-dpo-7b-v0.1](https://huggingface.co/jondurbin/bagel-dpo-7b-v0.1)
|
||||
|
||||
|
||||
|
||||
Works and generates coherent text.
|
||||
|
||||
gpu code example
|
||||
|
||||
```
|
||||
import torch
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import math
|
||||
|
||||
## v2 models
|
||||
model_path = "cloudyu/Mixtral_7Bx2_MoE_13B"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, use_default_system_prompt=False)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_path, torch_dtype=torch.float32, device_map='auto',local_files_only=False, load_in_4bit=True
|
||||
)
|
||||
print(model)
|
||||
prompt = input("please input prompt:")
|
||||
while len(prompt) > 0:
|
||||
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to("cuda")
|
||||
|
||||
generation_output = model.generate(
|
||||
input_ids=input_ids, max_new_tokens=500,repetition_penalty=1.2
|
||||
)
|
||||
print(tokenizer.decode(generation_output[0]))
|
||||
prompt = input("please input prompt:")
|
||||
```
|
||||
|
||||
CPU example
|
||||
|
||||
```
|
||||
import torch
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import math
|
||||
|
||||
## v2 models
|
||||
model_path = "cloudyu/Mixtral_7Bx2_MoE_13B"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, use_default_system_prompt=False)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_path, torch_dtype=torch.float32, device_map='cpu',local_files_only=False
|
||||
)
|
||||
print(model)
|
||||
prompt = input("please input prompt:")
|
||||
while len(prompt) > 0:
|
||||
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
|
||||
|
||||
generation_output = model.generate(
|
||||
input_ids=input_ids, max_new_tokens=500,repetition_penalty=1.2
|
||||
)
|
||||
print(tokenizer.decode(generation_output[0]))
|
||||
prompt = input("please input prompt:")
|
||||
|
||||
```
|
||||
Reference in New Issue
Block a user