初始化项目,由ModelHub XC社区提供模型
Model: Ansarinoorie2001/Mini-kugal Source: Original Platform
This commit is contained in:
37
.gitattributes
vendored
Normal file
37
.gitattributes
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
onnx/model.onnx_data filter=lfs diff=lfs merge=lfs -text
|
||||
onnx/model_fp16.onnx_data filter=lfs diff=lfs merge=lfs -text
|
||||
336
README.md
Normal file
336
README.md
Normal file
@@ -0,0 +1,336 @@
|
||||
---
|
||||
library_name: transformers
|
||||
license: apache-2.0
|
||||
language:
|
||||
- en
|
||||
pipeline_tag: text-generation
|
||||
tags:
|
||||
- safetensors
|
||||
- onnx
|
||||
- transformers.js
|
||||
base_model:
|
||||
- HuggingFaceTB/SmolLM2-1.7B
|
||||
---
|
||||
|
||||
|
||||
# SmolLM2
|
||||
|
||||

|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Model Summary](#model-summary)
|
||||
2. [Evaluation](#evaluation)
|
||||
3. [Examples](#examples)
|
||||
4. [Limitations](#limitations)
|
||||
5. [Training](#training)
|
||||
6. [License](#license)
|
||||
7. [Citation](#citation)
|
||||
|
||||
## Model Summary
|
||||
|
||||
SmolLM2 is a family of compact language models available in three size: 135M, 360M, and 1.7B parameters. They are capable of solving a wide range of tasks while being lightweight enough to run on-device. More details in our paper: https://arxiv.org/abs/2502.02737v1
|
||||
|
||||
The 1.7B variant demonstrates significant advances over its predecessor SmolLM1-1.7B, particularly in instruction following, knowledge, reasoning, and mathematics. It was trained on 11 trillion tokens using a diverse dataset combination: FineWeb-Edu, DCLM, The Stack, along with new mathematics and coding datasets that we curated and will release soon. We developed the instruct version through supervised fine-tuning (SFT) using a combination of public datasets and our own curated datasets. We then applied Direct Preference Optimization (DPO) using [UltraFeedback](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized).
|
||||
|
||||
The instruct model additionally supports tasks such as text rewriting, summarization and function calling thanks to datasets developed by [Argilla](https://huggingface.co/argilla) such as [Synth-APIGen-v0.1](https://huggingface.co/datasets/argilla/Synth-APIGen-v0.1).
|
||||
You can find the SFT dataset here: https://huggingface.co/datasets/HuggingFaceTB/smoltalk.
|
||||
|
||||
For more details refer to: https://github.com/huggingface/smollm. You will find pre-training, post-training, evaluation and local inference code.
|
||||
|
||||
### How to use
|
||||
|
||||
#### Transformers
|
||||
```bash
|
||||
pip install transformers
|
||||
```
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
checkpoint = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
|
||||
|
||||
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||
# for multiple GPUs install accelerate and do `model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto")`
|
||||
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
||||
|
||||
messages = [{"role": "user", "content": "What is the capital of France."}]
|
||||
input_text=tokenizer.apply_chat_template(messages, tokenize=False)
|
||||
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
||||
outputs = model.generate(inputs, max_new_tokens=50, temperature=0.2, top_p=0.9, do_sample=True)
|
||||
print(tokenizer.decode(outputs[0]))
|
||||
```
|
||||
|
||||
|
||||
#### Chat in TRL
|
||||
You can also use the TRL CLI to chat with the model from the terminal:
|
||||
```bash
|
||||
pip install trl
|
||||
trl chat --model_name_or_path HuggingFaceTB/SmolLM2-1.7B-Instruct --device cpu
|
||||
```
|
||||
|
||||
#### Transformers.js
|
||||
|
||||
```bash
|
||||
npm i @huggingface/transformers
|
||||
```
|
||||
|
||||
```js
|
||||
import { pipeline } from "@huggingface/transformers";
|
||||
|
||||
// Create a text generation pipeline
|
||||
const generator = await pipeline(
|
||||
"text-generation",
|
||||
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
|
||||
);
|
||||
|
||||
// Define the list of messages
|
||||
const messages = [
|
||||
{ role: "system", content: "You are a helpful assistant." },
|
||||
{ role: "user", content: "Tell me a joke." },
|
||||
];
|
||||
|
||||
// Generate a response
|
||||
const output = await generator(messages, { max_new_tokens: 128 });
|
||||
console.log(output[0].generated_text.at(-1).content);
|
||||
// "Why don't scientists trust atoms?\n\nBecause they make up everything!"
|
||||
```
|
||||
|
||||
## Evaluation
|
||||
|
||||
In this section, we report the evaluation results of SmolLM2. All evaluations are zero-shot unless stated otherwise, and we use [lighteval](https://github.com/huggingface/lighteval) to run them.
|
||||
|
||||
## Base Pre-Trained Model
|
||||
|
||||
| Metric | SmolLM2-1.7B | Llama-1B | Qwen2.5-1.5B | SmolLM1-1.7B |
|
||||
|------------------|--------------|-------------|---------------|--------------|
|
||||
| HellaSwag | **68.7** | 61.2 | 66.4 | 62.9 |
|
||||
| ARC (Average) | **60.5** | 49.2 | 58.5 | 59.9 |
|
||||
| PIQA | **77.6** | 74.8 | 76.1 | 76.0 |
|
||||
| MMLU-Pro (MCF) | **19.4** | 11.7 | 13.7 | 10.8 |
|
||||
| CommonsenseQA | **43.6** | 41.2 | 34.1 | 38.0 |
|
||||
| TriviaQA | **36.7** | 28.1 | 20.9 | 22.5 |
|
||||
| Winogrande | **59.4** | 57.8 | 59.3 | 54.7 |
|
||||
| OpenBookQA | 42.2 | 38.4 | 40.0 | **42.4** |
|
||||
| GSM8K (5-shot) | 31.0 | 7.2 | **61.3** | 5.5 |
|
||||
|
||||
## Instruction Model
|
||||
|
||||
| Metric | SmolLM2-1.7B-Instruct | Llama-1B-Instruct | Qwen2.5-1.5B-Instruct | SmolLM1-1.7B-Instruct |
|
||||
|:-----------------------------|:---------------------:|:-----------------:|:----------------------:|:----------------------:|
|
||||
| IFEval (Average prompt/inst) | **56.7** | 53.5 | 47.4 | 23.1 |
|
||||
| MT-Bench | 6.13 | 5.48 | **6.52** | 4.33 |
|
||||
| OpenRewrite-Eval (micro_avg RougeL) | 44.9 | 39.2 | **46.9** | NaN |
|
||||
| HellaSwag | **66.1** | 56.1 | 60.9 | 55.5 |
|
||||
| ARC (Average) | **51.7** | 41.6 | 46.2 | 43.7 |
|
||||
| PIQA | **74.4** | 72.3 | 73.2 | 71.6 |
|
||||
| MMLU-Pro (MCF) | 19.3 | 12.7 | **24.2** | 11.7 |
|
||||
| BBH (3-shot) | 32.2 | 27.6 | **35.3** | 25.7 |
|
||||
| GSM8K (5-shot) | **48.2** | 26.8 | 42.8 | 4.62 |
|
||||
|
||||
|
||||
## Examples
|
||||
Below are some system and instruct prompts that work well for special tasks
|
||||
|
||||
### Text rewriting
|
||||
|
||||
```python
|
||||
system_prompt_rewrite = "You are an AI writing assistant. Your task is to rewrite the user's email to make it more professional and approachable while maintaining its main points and key message. Do not return any text other than the rewritten message."
|
||||
user_prompt_rewrite = "Rewrite the message below to make it more friendly and approachable while maintaining its main points and key message. Do not add any new information or return any text other than the rewritten message\nThe message:"
|
||||
messages = [{"role": "system", "content": system_prompt_rewrite}, {"role": "user", "content":f"{user_prompt_rewrite} The CI is failing after your last commit!"}]
|
||||
input_text=tokenizer.apply_chat_template(messages, tokenize=False)
|
||||
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
||||
outputs = model.generate(inputs, max_new_tokens=50, temperature=0.2, top_p=0.9, do_sample=True)
|
||||
print(tokenizer.decode(outputs[0]))
|
||||
```
|
||||
```
|
||||
Hey there! I noticed that the CI isn't passing after your latest commit. Could you take a look and let me know what's going on? Thanks so much for your help!
|
||||
```
|
||||
|
||||
### Summarization
|
||||
|
||||
```python
|
||||
system_prompt_summarize = "Provide a concise, objective summary of the input text in up to three sentences, focusing on key actions and intentions without using second or third person pronouns."
|
||||
messages = [{"role": "system", "content": system_prompt_summarize}, {"role": "user", "content": INSERT_LONG_EMAIL}]
|
||||
input_text=tokenizer.apply_chat_template(messages, tokenize=False)
|
||||
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
||||
outputs = model.generate(inputs, max_new_tokens=50, temperature=0.2, top_p=0.9, do_sample=True)
|
||||
print(tokenizer.decode(outputs[0]))
|
||||
```
|
||||
|
||||
### Function calling
|
||||
|
||||
SmolLM2-1.7B-Instruct can handle function calling, it scores 27% on the [BFCL Leaderboard](https://gorilla.cs.berkeley.edu/blogs/8_berkeley_function_calling_leaderboard.html). Here's how you can leverage it:
|
||||
|
||||
```python
|
||||
import json
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from jinja2 import Template
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.utils import get_json_schema
|
||||
|
||||
|
||||
system_prompt = Template("""You are an expert in composing functions. You are given a question and a set of possible functions.
|
||||
Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
|
||||
If none of the functions can be used, point it out and refuse to answer.
|
||||
If the given question lacks the parameters required by the function, also point it out.
|
||||
|
||||
You have access to the following tools:
|
||||
<tools>{{ tools }}</tools>
|
||||
|
||||
The output MUST strictly adhere to the following format, and NO other text MUST be included.
|
||||
The example format is as follows. Please make sure the parameter type is correct. If no function call is needed, please make the tool calls an empty list '[]'.
|
||||
<tool_call>[
|
||||
{"name": "func_name1", "arguments": {"argument1": "value1", "argument2": "value2"}},
|
||||
... (more tool calls as required)
|
||||
]</tool_call>""")
|
||||
|
||||
|
||||
def prepare_messages(
|
||||
query: str,
|
||||
tools: Optional[dict[str, any]] = None,
|
||||
history: Optional[list[dict[str, str]]] = None
|
||||
) -> list[dict[str, str]]:
|
||||
"""Prepare the system and user messages for the given query and tools.
|
||||
|
||||
Args:
|
||||
query: The query to be answered.
|
||||
tools: The tools available to the user. Defaults to None, in which case if a
|
||||
list without content will be passed to the model.
|
||||
history: Exchange of messages, including the system_prompt from
|
||||
the first query. Defaults to None, the first message in a conversation.
|
||||
"""
|
||||
if tools is None:
|
||||
tools = []
|
||||
if history:
|
||||
messages = history.copy()
|
||||
messages.append({"role": "user", "content": query})
|
||||
else:
|
||||
messages = [
|
||||
{"role": "system", "content": system_prompt.render(tools=json.dumps(tools))},
|
||||
{"role": "user", "content": query}
|
||||
]
|
||||
return messages
|
||||
|
||||
|
||||
def parse_response(text: str) -> str | dict[str, any]:
|
||||
"""Parses a response from the model, returning either the
|
||||
parsed list with the tool calls parsed, or the
|
||||
model thought or response if couldn't generate one.
|
||||
|
||||
Args:
|
||||
text: Response from the model.
|
||||
"""
|
||||
pattern = r"<tool_call>(.*?)</tool_call>"
|
||||
matches = re.findall(pattern, text, re.DOTALL)
|
||||
if matches:
|
||||
return json.loads(matches[0])
|
||||
return text
|
||||
|
||||
|
||||
model_name_smollm = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name_smollm, device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name_smollm)
|
||||
|
||||
from datetime import datetime
|
||||
import random
|
||||
|
||||
def get_current_time() -> str:
|
||||
"""Returns the current time in 24-hour format.
|
||||
|
||||
Returns:
|
||||
str: Current time in HH:MM:SS format.
|
||||
"""
|
||||
return datetime.now().strftime("%H:%M:%S")
|
||||
|
||||
|
||||
def get_random_number_between(min: int, max: int) -> int:
|
||||
"""
|
||||
Gets a random number between min and max.
|
||||
|
||||
Args:
|
||||
min: The minimum number.
|
||||
max: The maximum number.
|
||||
|
||||
Returns:
|
||||
A random number between min and max.
|
||||
"""
|
||||
return random.randint(min, max)
|
||||
|
||||
|
||||
tools = [get_json_schema(get_random_number_between), get_json_schema(get_current_time)]
|
||||
|
||||
toolbox = {"get_random_number_between": get_random_number_between, "get_current_time": get_current_time}
|
||||
|
||||
query = "Give me a number between 1 and 300"
|
||||
|
||||
messages = prepare_messages(query, tools=tools)
|
||||
|
||||
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
||||
result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
||||
|
||||
tool_calls = parse_response(result)
|
||||
# [{'name': 'get_random_number_between', 'arguments': {'min': 1, 'max': 300}}
|
||||
|
||||
# Get tool responses
|
||||
tool_responses = [toolbox.get(tc["name"])(*tc["arguments"].values()) for tc in tool_calls]
|
||||
# [63]
|
||||
|
||||
# For the second turn, rebuild the history of messages:
|
||||
history = messages.copy()
|
||||
# Add the "parsed response"
|
||||
history.append({"role": "assistant", "content": result})
|
||||
query = "Can you give me the hour?"
|
||||
history.append({"role": "user", "content": query})
|
||||
|
||||
inputs = tokenizer.apply_chat_template(history, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
||||
result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
||||
|
||||
tool_calls = parse_response(result)
|
||||
tool_responses = [toolbox.get(tc["name"])(*tc["arguments"].values()) for tc in tool_calls]
|
||||
# ['07:57:25']
|
||||
```
|
||||
More details such as parallel function calls and tools not available can be found [here](https://huggingface.co/HuggingFaceTB/SmolLM2-1.7B-Instruct/blob/main/instructions_function_calling.md)
|
||||
|
||||
## Limitations
|
||||
|
||||
SmolLM2 models primarily understand and generate content in English. They can produce text on a variety of topics, but the generated content may not always be factually accurate, logically consistent, or free from biases present in the training data. These models should be used as assistive tools rather than definitive sources of information. Users should always verify important information and critically evaluate any generated content.
|
||||
|
||||
## Training
|
||||
|
||||
### Model
|
||||
|
||||
- **Architecture:** Transformer decoder
|
||||
- **Pretraining tokens:** 11T
|
||||
- **Precision:** bfloat16
|
||||
|
||||
### Hardware
|
||||
|
||||
- **GPUs:** 256 H100
|
||||
|
||||
### Software
|
||||
|
||||
- **Training Framework:** [nanotron](https://github.com/huggingface/nanotron/tree/main)
|
||||
- **Alignment Handbook** [alignment-handbook](https://github.com/huggingface/alignment-handbook/)
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
||||
## Citation
|
||||
```bash
|
||||
@misc{allal2025smollm2smolgoesbig,
|
||||
title={SmolLM2: When Smol Goes Big -- Data-Centric Training of a Small Language Model},
|
||||
author={Loubna Ben Allal and Anton Lozhkov and Elie Bakouch and Gabriel Martín Blázquez and Guilherme Penedo and Lewis Tunstall and Andrés Marafioti and Hynek Kydlíček and Agustín Piqueres Lajarín and Vaibhav Srivastav and Joshua Lochner and Caleb Fahlgren and Xuan-Son Nguyen and Clémentine Fourrier and Ben Burtenshaw and Hugo Larcher and Haojun Zhao and Cyril Zakka and Mathieu Morlon and Colin Raffel and Leandro von Werra and Thomas Wolf},
|
||||
year={2025},
|
||||
eprint={2502.02737},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={cs.CL},
|
||||
url={https://arxiv.org/abs/2502.02737},
|
||||
}
|
||||
```
|
||||
22
all_results.json
Normal file
22
all_results.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"epoch": 2.996074326092646,
|
||||
"eval_logits/chosen": -0.34099623560905457,
|
||||
"eval_logits/rejected": -0.3685227334499359,
|
||||
"eval_logps/chosen": -310.2510070800781,
|
||||
"eval_logps/rejected": -275.43145751953125,
|
||||
"eval_loss": 0.587827205657959,
|
||||
"eval_rewards/accuracies": 0.6746031641960144,
|
||||
"eval_rewards/chosen": 0.01673175022006035,
|
||||
"eval_rewards/margins": 0.5906793475151062,
|
||||
"eval_rewards/rejected": -0.573947548866272,
|
||||
"eval_runtime": 18.8462,
|
||||
"eval_samples": 2000,
|
||||
"eval_samples_per_second": 106.122,
|
||||
"eval_steps_per_second": 3.343,
|
||||
"total_flos": 0.0,
|
||||
"train_loss": 0.5334697115221363,
|
||||
"train_runtime": 7355.3343,
|
||||
"train_samples": 61134,
|
||||
"train_samples_per_second": 24.935,
|
||||
"train_steps_per_second": 0.195
|
||||
}
|
||||
40
config.json
Normal file
40
config.json
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"architectures": [
|
||||
"LlamaForCausalLM"
|
||||
],
|
||||
"attention_bias": false,
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 2048,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 8192,
|
||||
"max_position_embeddings": 8192,
|
||||
"mlp_bias": false,
|
||||
"model_type": "llama",
|
||||
"num_attention_heads": 32,
|
||||
"num_hidden_layers": 24,
|
||||
"num_key_value_heads": 32,
|
||||
"pad_token_id": 2,
|
||||
"pretraining_tp": 1,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_scaling": null,
|
||||
"rope_theta": 130000,
|
||||
"tie_word_embeddings": true,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.42.3",
|
||||
"transformers.js_config": {
|
||||
"dtype": "q4",
|
||||
"kv_cache_dtype": {
|
||||
"q4f16": "float16",
|
||||
"fp16": "float16"
|
||||
},
|
||||
"use_external_data_format": {
|
||||
"model.onnx": true,
|
||||
"model_fp16.onnx": true
|
||||
}
|
||||
},
|
||||
"use_cache": true,
|
||||
"vocab_size": 49152
|
||||
}
|
||||
16
eval_results.json
Normal file
16
eval_results.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"epoch": 2.996074326092646,
|
||||
"eval_logits/chosen": -0.34099623560905457,
|
||||
"eval_logits/rejected": -0.3685227334499359,
|
||||
"eval_logps/chosen": -310.2510070800781,
|
||||
"eval_logps/rejected": -275.43145751953125,
|
||||
"eval_loss": 0.587827205657959,
|
||||
"eval_rewards/accuracies": 0.6746031641960144,
|
||||
"eval_rewards/chosen": 0.01673175022006035,
|
||||
"eval_rewards/margins": 0.5906793475151062,
|
||||
"eval_rewards/rejected": -0.573947548866272,
|
||||
"eval_runtime": 18.8462,
|
||||
"eval_samples": 2000,
|
||||
"eval_samples_per_second": 106.122,
|
||||
"eval_steps_per_second": 3.343
|
||||
}
|
||||
7
generation_config.json
Normal file
7
generation_config.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"_from_model_config": true,
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"pad_token_id": 2,
|
||||
"transformers_version": "4.42.3"
|
||||
}
|
||||
183
instructions_function_calling.md
Normal file
183
instructions_function_calling.md
Normal file
@@ -0,0 +1,183 @@
|
||||
## Quick start
|
||||
Instructions for funtion calling:
|
||||
|
||||
```python
|
||||
import json
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from jinja2 import Template
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.utils import get_json_schema
|
||||
|
||||
|
||||
system_prompt = Template("""You are an expert in composing functions. You are given a question and a set of possible functions.
|
||||
Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
|
||||
If none of the functions can be used, point it out and refuse to answer.
|
||||
If the given question lacks the parameters required by the function, also point it out.
|
||||
|
||||
You have access to the following tools:
|
||||
<tools>{{ tools }}</tools>
|
||||
|
||||
The output MUST strictly adhere to the following format, and NO other text MUST be included.
|
||||
The example format is as follows. Please make sure the parameter type is correct. If no function call is needed, please make the tool calls an empty list '[]'.
|
||||
<tool_call>[
|
||||
{"name": "func_name1", "arguments": {"argument1": "value1", "argument2": "value2"}},
|
||||
... (more tool calls as required)
|
||||
]</tool_call>""")
|
||||
|
||||
|
||||
def prepare_messages(
|
||||
query: str,
|
||||
tools: Optional[dict[str, any]] = None,
|
||||
history: Optional[list[dict[str, str]]] = None
|
||||
) -> list[dict[str, str]]:
|
||||
"""Prepare the system and user messages for the given query and tools.
|
||||
|
||||
Args:
|
||||
query: The query to be answered.
|
||||
tools: The tools available to the user. Defaults to None, in which case if a
|
||||
list without content will be passed to the model.
|
||||
history: Exchange of messages, including the system_prompt from
|
||||
the first query. Defaults to None, the first message in a conversation.
|
||||
"""
|
||||
if tools is None:
|
||||
tools = []
|
||||
if history:
|
||||
messages = history.copy()
|
||||
messages.append({"role": "user", "content": query})
|
||||
else:
|
||||
messages = [
|
||||
{"role": "system", "content": system_prompt.render(tools=json.dumps(tools))},
|
||||
{"role": "user", "content": query}
|
||||
]
|
||||
return messages
|
||||
|
||||
|
||||
def parse_response(text: str) -> str | dict[str, any]:
|
||||
"""Parses a response from the model, returning either the
|
||||
parsed list with the tool calls parsed, or the
|
||||
model thought or response if couldn't generate one.
|
||||
|
||||
Args:
|
||||
text: Response from the model.
|
||||
"""
|
||||
pattern = r"<tool_call>(.*?)</tool_call>"
|
||||
matches = re.findall(pattern, text, re.DOTALL)
|
||||
if matches:
|
||||
return json.loads(matches[0])
|
||||
return text
|
||||
|
||||
model_name_smollm = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name_smollm, device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name_smollm)
|
||||
|
||||
from datetime import datetime
|
||||
import random
|
||||
|
||||
def get_current_time() -> str:
|
||||
"""Returns the current time in 24-hour format.
|
||||
|
||||
Returns:
|
||||
str: Current time in HH:MM:SS format.
|
||||
"""
|
||||
return datetime.now().strftime("%H:%M:%S")
|
||||
|
||||
|
||||
def get_random_number_between(min: int, max: int) -> int:
|
||||
"""
|
||||
Gets a random number between min and max.
|
||||
|
||||
Args:
|
||||
min: The minimum number.
|
||||
max: The maximum number.
|
||||
|
||||
Returns:
|
||||
A random number between min and max.
|
||||
"""
|
||||
return random.randint(min, max)
|
||||
|
||||
|
||||
tools = [get_json_schema(get_random_number_between), get_json_schema(get_current_time)]
|
||||
|
||||
toolbox = {"get_random_number_between": get_random_number_between, "get_current_time": get_current_time}
|
||||
|
||||
query = "Give me a number between 1 and 300"
|
||||
|
||||
messages = prepare_messages(query, tools=tools)
|
||||
|
||||
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
||||
result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
||||
|
||||
tool_calls = parse_response(result)
|
||||
# [{'name': 'get_random_number_between', 'arguments': {'min': 1, 'max': 300}}
|
||||
|
||||
# Get tool responses
|
||||
tool_responses = [toolbox.get(tc["name"])(*tc["arguments"].values()) for tc in tool_calls]
|
||||
# [63]
|
||||
|
||||
# For the second turn, rebuild the history of messages:
|
||||
history = messages.copy()
|
||||
# Add the "parsed response"
|
||||
history.append({"role": "assistant", "content": result})
|
||||
query = "Can you give me the hour?"
|
||||
history.append({"role": "user", "content": query})
|
||||
|
||||
inputs = tokenizer.apply_chat_template(history, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
||||
result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
||||
|
||||
tool_calls = parse_response(result)
|
||||
tool_responses = [toolbox.get(tc["name"])(*tc["arguments"].values()) for tc in tool_calls]
|
||||
# ['07:57:25']
|
||||
```
|
||||
|
||||
#### Parallel function calls
|
||||
|
||||
Multiple calls required by the same query.
|
||||
|
||||
```python
|
||||
query = "Can you give me the hour and a random number between 1 and 50?"
|
||||
|
||||
messages = prepare_messages(query, tools=tools)
|
||||
|
||||
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
||||
result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
||||
|
||||
tool_calls = parse_response(result)
|
||||
tool_responses = [toolbox.get(tc["name"])(*tc["arguments"].values()) for tc in tool_calls]
|
||||
# ['09:24:52', 50]
|
||||
|
||||
query = "Can you give me a random number between 1 and 10, other between 200 and 210 and another one between 55 and 60?"
|
||||
|
||||
messages = prepare_messages(query, tools=tools)
|
||||
|
||||
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
||||
result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
||||
|
||||
tool_calls = parse_response(result)
|
||||
tool_responses = [toolbox.get(tc["name"])(*tc["arguments"].values()) for tc in tool_calls]
|
||||
# [7, 202, 60]
|
||||
```
|
||||
|
||||
#### Tools not available
|
||||
|
||||
```python
|
||||
query = "Can you open a new page with youtube?"
|
||||
|
||||
messages = prepare_messages(query, tools=tools)
|
||||
|
||||
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
||||
result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
||||
|
||||
tool_calls = parse_response(result)
|
||||
# []
|
||||
|
||||
# The response will be something similar to the following:
|
||||
# "The query cannot be answered with the provided tools. Please make sure the tools are correctly installed and imported. If the tools are not installed, install them using pip: 'pip install -r tools.txt'. If the tools are already installed, ensure they are correctly configured. If the tools are not correctly configured, please contact the support team. The output MUST strictly adhere to the following format, and NO other text MUST be included.\n\n<tool_call>[]</tool_call>"
|
||||
```
|
||||
48901
merges.txt
Normal file
48901
merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
3
model.safetensors
Normal file
3
model.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f55217be716b6a997b97b9d8d7eb6fad02e00858f5010ec24f64603c3a98a0e8
|
||||
size 3422777952
|
||||
3
onnx/model.onnx
Normal file
3
onnx/model.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:c538daa78f811830dc9028aa228a63a218147ab478c0c65ef6e2d8cab532380a
|
||||
size 165580
|
||||
3
onnx/model.onnx_data
Normal file
3
onnx/model.onnx_data
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:023686a59a534e45af70bc5f99ae70e592481701680591f9844fc140a3db220a
|
||||
size 6847602688
|
||||
3
onnx/model_bnb4.onnx
Normal file
3
onnx/model_bnb4.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:ac7400ca170e6644d95d26394846d2cce8083298e8519b1df7ac1eaa0ab2b5b6
|
||||
size 1311307655
|
||||
3
onnx/model_fp16.onnx
Normal file
3
onnx/model_fp16.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3d891d77661f6339f727b3188f1c21b1429a1e4e55c40d1f60f85048b00a4348
|
||||
size 1326807956
|
||||
3
onnx/model_fp16.onnx_data
Normal file
3
onnx/model_fp16.onnx_data
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5f48c05c14ed97738f8dc5854c20c229ddc8661f43fa914085843901a4ba8740
|
||||
size 2097152000
|
||||
3
onnx/model_int8.onnx
Normal file
3
onnx/model_int8.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:db5cb9057f4e7014f00c38fd9764f0103e60bb4d145ad600b7144625f0d56930
|
||||
size 1714119778
|
||||
3
onnx/model_q4.onnx
Normal file
3
onnx/model_q4.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:467b7b8f62d99f184d3628d24b8d65c151e331695f6e9ea997616c4e279e9a51
|
||||
size 1411969607
|
||||
3
onnx/model_q4f16.onnx
Normal file
3
onnx/model_q4f16.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:d94946187fb5f27579f3db4ba21fb7f7466c7cbd18956bd420d3981f75282f9c
|
||||
size 1108730338
|
||||
3
onnx/model_quantized.onnx
Normal file
3
onnx/model_quantized.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b1110061268f161512c234c4d43cd9dbe61eeb1454e9b94b652edf4de55ae51c
|
||||
size 1714119846
|
||||
3
onnx/model_uint8.onnx
Normal file
3
onnx/model_uint8.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b1110061268f161512c234c4d43cd9dbe61eeb1454e9b94b652edf4de55ae51c
|
||||
size 1714119846
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:e6bfce1916438dd2e6553aa0a62d418087b3ae04f8af75e714ad1f01b7663db6
|
||||
size 114828
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b3d7723fd0715ce6dcbccf7bb2097f59490b0ac670f798f5378ef5abb7d1301d
|
||||
size 828
|
||||
34
special_tokens_map.json
Normal file
34
special_tokens_map.json
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"additional_special_tokens": [
|
||||
"<|im_start|>",
|
||||
"<|im_end|>"
|
||||
],
|
||||
"bos_token": {
|
||||
"content": "<|im_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "<|im_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": {
|
||||
"content": "<|im_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"unk_token": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
98249
tokenizer.json
Normal file
98249
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
154
tokenizer_config.json
Normal file
154
tokenizer_config.json
Normal file
@@ -0,0 +1,154 @@
|
||||
{
|
||||
"add_prefix_space": false,
|
||||
"added_tokens_decoder": {
|
||||
"0": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"1": {
|
||||
"content": "<|im_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"2": {
|
||||
"content": "<|im_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"3": {
|
||||
"content": "<repo_name>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"4": {
|
||||
"content": "<reponame>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"5": {
|
||||
"content": "<file_sep>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"6": {
|
||||
"content": "<filename>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"7": {
|
||||
"content": "<gh_stars>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"8": {
|
||||
"content": "<issue_start>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"9": {
|
||||
"content": "<issue_comment>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"10": {
|
||||
"content": "<issue_closed>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"11": {
|
||||
"content": "<jupyter_start>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"12": {
|
||||
"content": "<jupyter_text>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"13": {
|
||||
"content": "<jupyter_code>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"14": {
|
||||
"content": "<jupyter_output>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"15": {
|
||||
"content": "<jupyter_script>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"16": {
|
||||
"content": "<empty_output>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
}
|
||||
},
|
||||
"additional_special_tokens": [
|
||||
"<|im_start|>",
|
||||
"<|im_end|>"
|
||||
],
|
||||
"bos_token": "<|im_start|>",
|
||||
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful AI assistant named SmolLM, trained by Hugging Face<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"eos_token": "<|im_end|>",
|
||||
"model_max_length": 8192,
|
||||
"pad_token": "<|im_end|>",
|
||||
"tokenizer_class": "GPT2Tokenizer",
|
||||
"unk_token": "<|endoftext|>",
|
||||
"vocab_size": 49152
|
||||
}
|
||||
9
train_results.json
Normal file
9
train_results.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"epoch": 2.996074326092646,
|
||||
"total_flos": 0.0,
|
||||
"train_loss": 0.5334697115221363,
|
||||
"train_runtime": 7355.3343,
|
||||
"train_samples": 61134,
|
||||
"train_samples_per_second": 24.935,
|
||||
"train_steps_per_second": 0.195
|
||||
}
|
||||
2426
trainer_state.json
Normal file
2426
trainer_state.json
Normal file
File diff suppressed because it is too large
Load Diff
3
training_args.bin
Normal file
3
training_args.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:7649586c424c337f6c403fdb617ac9d954daf9a7192f3afe5b6318f37e9bb19e
|
||||
size 6520
|
||||
1
vocab.json
Normal file
1
vocab.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user