73 lines
1.9 KiB
Markdown
73 lines
1.9 KiB
Markdown
|
|
---
|
||
|
|
license: apache-2.0
|
||
|
|
language:
|
||
|
|
- id
|
||
|
|
tags:
|
||
|
|
- mistral
|
||
|
|
- text-generation-inference
|
||
|
|
---
|
||
|
|
### mistral-indo-7b
|
||
|
|
|
||
|
|
[Mistral 7b v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) fine-tuned on [Indonesian's instructions dataset](https://huggingface.co/datasets/sarahlintang/Alpaca_indo_instruct).
|
||
|
|
|
||
|
|
|
||
|
|
### Prompt template:
|
||
|
|
```
|
||
|
|
### Human: {Instruction}### Assistant: {response}
|
||
|
|
```
|
||
|
|
|
||
|
|
### Example of Usage
|
||
|
|
```
|
||
|
|
import torch
|
||
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoTokenizer, GenerationConfig
|
||
|
|
|
||
|
|
model_id = "sarahlintang/mistral-indo-7b"
|
||
|
|
|
||
|
|
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True).to("cuda")
|
||
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||
|
|
|
||
|
|
def create_instruction(instruction):
|
||
|
|
prompt = f"### Human: {instruction} ### Assistant: "
|
||
|
|
return prompt
|
||
|
|
|
||
|
|
|
||
|
|
def generate(
|
||
|
|
instruction,
|
||
|
|
max_new_tokens=128,
|
||
|
|
temperature=0.1,
|
||
|
|
top_p=0.75,
|
||
|
|
top_k=40,
|
||
|
|
num_beams=4,
|
||
|
|
**kwargs
|
||
|
|
):
|
||
|
|
|
||
|
|
prompt = create_instruction(instruction)
|
||
|
|
inputs = tokenizer(prompt, return_tensors="pt")
|
||
|
|
input_ids = inputs["input_ids"].to("cuda")
|
||
|
|
attention_mask = inputs["attention_mask"].to("cuda")
|
||
|
|
generation_config = GenerationConfig(
|
||
|
|
temperature=temperature,
|
||
|
|
top_p=top_p,
|
||
|
|
top_k=top_k,
|
||
|
|
num_beams=num_beams,
|
||
|
|
**kwargs,
|
||
|
|
)
|
||
|
|
with torch.no_grad():
|
||
|
|
generation_output = model.generate(
|
||
|
|
input_ids=input_ids,
|
||
|
|
attention_mask=attention_mask,
|
||
|
|
generation_config=generation_config,
|
||
|
|
return_dict_in_generate=True,
|
||
|
|
output_scores=True,
|
||
|
|
max_new_tokens=max_new_tokens,
|
||
|
|
early_stopping=True
|
||
|
|
)
|
||
|
|
s = generation_output.sequences[0]
|
||
|
|
output = tokenizer.decode(s)
|
||
|
|
return output.split("### Assistant:")[1].strip()
|
||
|
|
|
||
|
|
instruction = "Sebutkan lima macam makanan khas Indonesia."
|
||
|
|
print(generate(instruction))
|
||
|
|
|
||
|
|
```
|