初始化项目,由ModelHub XC社区提供模型
Model: cygnisai/Cygnis-Alpha-1.7B-v0.1 Source: Original Platform
This commit is contained in:
38
.gitattributes
vendored
Normal file
38
.gitattributes
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
onnx/model.onnx_data filter=lfs diff=lfs merge=lfs -text
|
||||
onnx/model_fp16.onnx_data filter=lfs diff=lfs merge=lfs -text
|
||||
Cygnis-Alpha-1.7B-v1.png filter=lfs diff=lfs merge=lfs -text
|
||||
3
Cygnis-Alpha-1.7B-v1.png
Normal file
3
Cygnis-Alpha-1.7B-v1.png
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:51f812c1bcbb111ce467bd703312eae26a2e66ded7a7a868a5398e1718c9c81f
|
||||
size 966199
|
||||
136
README.md
Normal file
136
README.md
Normal file
@@ -0,0 +1,136 @@
|
||||
---
|
||||
library_name: transformers
|
||||
license: apache-2.0
|
||||
language:
|
||||
- en
|
||||
pipeline_tag: text-generation
|
||||
base_model: HuggingFaceTB/SmolLM2-1.7B
|
||||
tags:
|
||||
- finetuned
|
||||
- sft
|
||||
- smollm2
|
||||
- sovereign-ai
|
||||
- safetensors
|
||||
- onnx
|
||||
- transformers.js
|
||||
---
|
||||
|
||||
|
||||
# Cygnis Alpha 1
|
||||
|
||||
<div align="center" style="background:#06090f; border-radius:14px; border:1px solid #0f1e30; overflow:hidden; margin-bottom:20px;">
|
||||
<img src="https://huggingface.co/cygnisai/Cygnis-Alpha-1.7B-v0.1/resolve/main/Cygnis-Alpha-1.7B-v1.png" width="100%" style="display:block;">
|
||||
</div>
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Model Summary](#model-summary)
|
||||
2. [Evaluation](#evaluation)
|
||||
3. [Examples](#examples)
|
||||
4. [Limitations](#limitations)
|
||||
5. [Training](#training)
|
||||
6. [License](#license)
|
||||
7. [Citation](#citation)
|
||||
|
||||
## Model Summary
|
||||
|
||||
**Cygnis Alpha v1** is a compact, high-performance language model based on the **SmolLM2-1.7B** architecture. It is designed to be lightweight enough to run on-device while maintaining high reasoning and instruction-following capabilities.
|
||||
|
||||
The 1.7B variant represents a significant leap over previous generations, trained on **11 trillion tokens** using a high-quality mix of FineWeb-Edu, DCLM, and specialized coding/math datasets. This model has been refined using Supervised Fine-Tuning (SFT) and Direct Preference Optimization (DPO) to ensure logical consistency and helpfulness.
|
||||
|
||||
### How to use
|
||||
|
||||
#### Transformers
|
||||
```python
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
checkpoint = "cygnisai/Cygnis-Alpha-1.7B-v0.1"
|
||||
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
||||
|
||||
messages = [{"role": "user", "content": "What is the core philosophy of sovereign AI?"}]
|
||||
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
||||
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
||||
outputs = model.generate(inputs, max_new_tokens=100, temperature=0.2, top_p=0.9, do_sample=True)
|
||||
|
||||
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
||||
```
|
||||
|
||||
#### Transformers.js
|
||||
```javascript
|
||||
import { pipeline } from "@huggingface/transformers";
|
||||
|
||||
const generator = await pipeline(
|
||||
"text-generation",
|
||||
"cygnisai/Cygnis-Alpha-1.7B-v0.1",
|
||||
);
|
||||
|
||||
const messages = [
|
||||
{ role: "system", content: "You are Cygnis, a sovereign AI assistant." },
|
||||
{ role: "user", content: "Hello!" },
|
||||
];
|
||||
|
||||
const output = await generator(messages, { max_new_tokens: 128 });
|
||||
console.log(output[0].generated_text.at(-1).content);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Evaluation
|
||||
|
||||
Cygnis Alpha v1 inherits the state-of-the-art benchmarks of the SmolLM2-1.7B-Instruct core. Evaluations are zero-shot using [lighteval](https://github.com/huggingface/lighteval).
|
||||
|
||||
### Instruction Model Benchmarks
|
||||
|
||||
| Metric | Cygnis Alpha v1 (1.7B) | Llama-1B-Instruct | Qwen2.5-1.5B-Instruct |
|
||||
|:------------------------------|:---------------------:|:-----------------:|:----------------------:|
|
||||
| **IFEval** (Avg prompt/inst) | **56.7** | 53.5 | 47.4 |
|
||||
| **MT-Bench** | 6.13 | 5.48 | **6.52** |
|
||||
| **HellaSwag** | **66.1** | 56.1 | 60.9 |
|
||||
| **ARC (Average)** | **51.7** | 41.6 | 46.2 |
|
||||
| **GSM8K (5-shot)** | **48.2** | 26.8 | 42.8 |
|
||||
|
||||
---
|
||||
|
||||
## Examples
|
||||
|
||||
### Text Rewriting
|
||||
**System Prompt:** "You are an AI writing assistant. Your task is to rewrite the user's email to make it more professional and approachable while maintaining its main points and key message."
|
||||
|
||||
### Function Calling
|
||||
Cygnis Alpha v1 supports tool-use and function calling. It scores **27%** on the BFCL Leaderboard, allowing it to interface with APIs to fetch real-time data or perform calculations.
|
||||
|
||||
---
|
||||
|
||||
## Limitations
|
||||
|
||||
Cygnis Alpha v1 Instruct primarily understands and generates content in English. While powerful for its size, it may produce factually inaccurate or logically inconsistent content for highly specialized tasks. Users should verify important information and use it as an assistive tool.
|
||||
|
||||
## Training
|
||||
|
||||
### Model Specifications
|
||||
- **Architecture:** Transformer decoder
|
||||
- **Pretraining tokens:** 11T
|
||||
- **Precision:** bfloat16
|
||||
- **Hardware:** 256 H100 GPUs (original base training)
|
||||
|
||||
### Software
|
||||
- **Training Framework:** [nanotron](https://github.com/huggingface/nanotron/tree/main)
|
||||
- **Alignment:** [alignment-handbook](https://github.com/huggingface/alignment-handbook/)
|
||||
|
||||
## License
|
||||
|
||||
This model is licensed under [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
||||
## Citation
|
||||
```bibtex
|
||||
@misc{cygnis_alpha_1.7b_v0.1,
|
||||
title={SmolLM2: When Smol Goes Big -- Data-Centric Training of a Small Language Model},
|
||||
author={Simonc-44},
|
||||
year={2026},
|
||||
eprint={2502.02737},
|
||||
archivePrefix={arXiv},
|
||||
}
|
||||
```
|
||||
22
all_results.json
Normal file
22
all_results.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"epoch": 2.996074326092646,
|
||||
"eval_logits/chosen": -0.34099623560905457,
|
||||
"eval_logits/rejected": -0.3685227334499359,
|
||||
"eval_logps/chosen": -310.2510070800781,
|
||||
"eval_logps/rejected": -275.43145751953125,
|
||||
"eval_loss": 0.587827205657959,
|
||||
"eval_rewards/accuracies": 0.6746031641960144,
|
||||
"eval_rewards/chosen": 0.01673175022006035,
|
||||
"eval_rewards/margins": 0.5906793475151062,
|
||||
"eval_rewards/rejected": -0.573947548866272,
|
||||
"eval_runtime": 18.8462,
|
||||
"eval_samples": 2000,
|
||||
"eval_samples_per_second": 106.122,
|
||||
"eval_steps_per_second": 3.343,
|
||||
"total_flos": 0.0,
|
||||
"train_loss": 0.5334697115221363,
|
||||
"train_runtime": 7355.3343,
|
||||
"train_samples": 61134,
|
||||
"train_samples_per_second": 24.935,
|
||||
"train_steps_per_second": 0.195
|
||||
}
|
||||
29
config.json
Normal file
29
config.json
Normal file
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"architectures": [
|
||||
"LlamaForCausalLM"
|
||||
],
|
||||
"attention_bias": false,
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 2048,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 8192,
|
||||
"max_position_embeddings": 8192,
|
||||
"mlp_bias": false,
|
||||
"model_type": "llama",
|
||||
"num_attention_heads": 32,
|
||||
"num_hidden_layers": 24,
|
||||
"num_key_value_heads": 32,
|
||||
"pad_token_id": 2,
|
||||
"pretraining_tp": 1,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_scaling": null,
|
||||
"rope_theta": 130000,
|
||||
"tie_word_embeddings": true,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.42.3",
|
||||
"use_cache": true,
|
||||
"vocab_size": 49152
|
||||
}
|
||||
16
eval_results.json
Normal file
16
eval_results.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"epoch": 2.996074326092646,
|
||||
"eval_logits/chosen": -0.34099623560905457,
|
||||
"eval_logits/rejected": -0.3685227334499359,
|
||||
"eval_logps/chosen": -310.2510070800781,
|
||||
"eval_logps/rejected": -275.43145751953125,
|
||||
"eval_loss": 0.587827205657959,
|
||||
"eval_rewards/accuracies": 0.6746031641960144,
|
||||
"eval_rewards/chosen": 0.01673175022006035,
|
||||
"eval_rewards/margins": 0.5906793475151062,
|
||||
"eval_rewards/rejected": -0.573947548866272,
|
||||
"eval_runtime": 18.8462,
|
||||
"eval_samples": 2000,
|
||||
"eval_samples_per_second": 106.122,
|
||||
"eval_steps_per_second": 3.343
|
||||
}
|
||||
7
generation_config.json
Normal file
7
generation_config.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"_from_model_config": true,
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"pad_token_id": 2,
|
||||
"transformers_version": "4.42.3"
|
||||
}
|
||||
183
instructions_function_calling.md
Normal file
183
instructions_function_calling.md
Normal file
@@ -0,0 +1,183 @@
|
||||
## Quick start
|
||||
Instructions for funtion calling:
|
||||
|
||||
```python
|
||||
import json
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from jinja2 import Template
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.utils import get_json_schema
|
||||
|
||||
|
||||
system_prompt = Template("""You are an expert in composing functions. You are given a question and a set of possible functions.
|
||||
Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
|
||||
If none of the functions can be used, point it out and refuse to answer.
|
||||
If the given question lacks the parameters required by the function, also point it out.
|
||||
|
||||
You have access to the following tools:
|
||||
<tools>{{ tools }}</tools>
|
||||
|
||||
The output MUST strictly adhere to the following format, and NO other text MUST be included.
|
||||
The example format is as follows. Please make sure the parameter type is correct. If no function call is needed, please make the tool calls an empty list '[]'.
|
||||
<tool_call>[
|
||||
{"name": "func_name1", "arguments": {"argument1": "value1", "argument2": "value2"}},
|
||||
... (more tool calls as required)
|
||||
]</tool_call>""")
|
||||
|
||||
|
||||
def prepare_messages(
|
||||
query: str,
|
||||
tools: Optional[dict[str, any]] = None,
|
||||
history: Optional[list[dict[str, str]]] = None
|
||||
) -> list[dict[str, str]]:
|
||||
"""Prepare the system and user messages for the given query and tools.
|
||||
|
||||
Args:
|
||||
query: The query to be answered.
|
||||
tools: The tools available to the user. Defaults to None, in which case if a
|
||||
list without content will be passed to the model.
|
||||
history: Exchange of messages, including the system_prompt from
|
||||
the first query. Defaults to None, the first message in a conversation.
|
||||
"""
|
||||
if tools is None:
|
||||
tools = []
|
||||
if history:
|
||||
messages = history.copy()
|
||||
messages.append({"role": "user", "content": query})
|
||||
else:
|
||||
messages = [
|
||||
{"role": "system", "content": system_prompt.render(tools=json.dumps(tools))},
|
||||
{"role": "user", "content": query}
|
||||
]
|
||||
return messages
|
||||
|
||||
|
||||
def parse_response(text: str) -> str | dict[str, any]:
|
||||
"""Parses a response from the model, returning either the
|
||||
parsed list with the tool calls parsed, or the
|
||||
model thought or response if couldn't generate one.
|
||||
|
||||
Args:
|
||||
text: Response from the model.
|
||||
"""
|
||||
pattern = r"<tool_call>(.*?)</tool_call>"
|
||||
matches = re.findall(pattern, text, re.DOTALL)
|
||||
if matches:
|
||||
return json.loads(matches[0])
|
||||
return text
|
||||
|
||||
model_name_smollm = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name_smollm, device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name_smollm)
|
||||
|
||||
from datetime import datetime
|
||||
import random
|
||||
|
||||
def get_current_time() -> str:
|
||||
"""Returns the current time in 24-hour format.
|
||||
|
||||
Returns:
|
||||
str: Current time in HH:MM:SS format.
|
||||
"""
|
||||
return datetime.now().strftime("%H:%M:%S")
|
||||
|
||||
|
||||
def get_random_number_between(min: int, max: int) -> int:
|
||||
"""
|
||||
Gets a random number between min and max.
|
||||
|
||||
Args:
|
||||
min: The minimum number.
|
||||
max: The maximum number.
|
||||
|
||||
Returns:
|
||||
A random number between min and max.
|
||||
"""
|
||||
return random.randint(min, max)
|
||||
|
||||
|
||||
tools = [get_json_schema(get_random_number_between), get_json_schema(get_current_time)]
|
||||
|
||||
toolbox = {"get_random_number_between": get_random_number_between, "get_current_time": get_current_time}
|
||||
|
||||
query = "Give me a number between 1 and 300"
|
||||
|
||||
messages = prepare_messages(query, tools=tools)
|
||||
|
||||
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
||||
result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
||||
|
||||
tool_calls = parse_response(result)
|
||||
# [{'name': 'get_random_number_between', 'arguments': {'min': 1, 'max': 300}}
|
||||
|
||||
# Get tool responses
|
||||
tool_responses = [toolbox.get(tc["name"])(*tc["arguments"].values()) for tc in tool_calls]
|
||||
# [63]
|
||||
|
||||
# For the second turn, rebuild the history of messages:
|
||||
history = messages.copy()
|
||||
# Add the "parsed response"
|
||||
history.append({"role": "assistant", "content": result})
|
||||
query = "Can you give me the hour?"
|
||||
history.append({"role": "user", "content": query})
|
||||
|
||||
inputs = tokenizer.apply_chat_template(history, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
||||
result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
||||
|
||||
tool_calls = parse_response(result)
|
||||
tool_responses = [toolbox.get(tc["name"])(*tc["arguments"].values()) for tc in tool_calls]
|
||||
# ['07:57:25']
|
||||
```
|
||||
|
||||
#### Parallel function calls
|
||||
|
||||
Multiple calls required by the same query.
|
||||
|
||||
```python
|
||||
query = "Can you give me the hour and a random number between 1 and 50?"
|
||||
|
||||
messages = prepare_messages(query, tools=tools)
|
||||
|
||||
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
||||
result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
||||
|
||||
tool_calls = parse_response(result)
|
||||
tool_responses = [toolbox.get(tc["name"])(*tc["arguments"].values()) for tc in tool_calls]
|
||||
# ['09:24:52', 50]
|
||||
|
||||
query = "Can you give me a random number between 1 and 10, other between 200 and 210 and another one between 55 and 60?"
|
||||
|
||||
messages = prepare_messages(query, tools=tools)
|
||||
|
||||
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
||||
result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
||||
|
||||
tool_calls = parse_response(result)
|
||||
tool_responses = [toolbox.get(tc["name"])(*tc["arguments"].values()) for tc in tool_calls]
|
||||
# [7, 202, 60]
|
||||
```
|
||||
|
||||
#### Tools not available
|
||||
|
||||
```python
|
||||
query = "Can you open a new page with youtube?"
|
||||
|
||||
messages = prepare_messages(query, tools=tools)
|
||||
|
||||
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
||||
result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
||||
|
||||
tool_calls = parse_response(result)
|
||||
# []
|
||||
|
||||
# The response will be something similar to the following:
|
||||
# "The query cannot be answered with the provided tools. Please make sure the tools are correctly installed and imported. If the tools are not installed, install them using pip: 'pip install -r tools.txt'. If the tools are already installed, ensure they are correctly configured. If the tools are not correctly configured, please contact the support team. The output MUST strictly adhere to the following format, and NO other text MUST be included.\n\n<tool_call>[]</tool_call>"
|
||||
```
|
||||
48901
merges.txt
Normal file
48901
merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
3
model.safetensors
Normal file
3
model.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f55217be716b6a997b97b9d8d7eb6fad02e00858f5010ec24f64603c3a98a0e8
|
||||
size 3422777952
|
||||
3
onnx/model.onnx
Normal file
3
onnx/model.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:c538daa78f811830dc9028aa228a63a218147ab478c0c65ef6e2d8cab532380a
|
||||
size 165580
|
||||
3
onnx/model.onnx_data
Normal file
3
onnx/model.onnx_data
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:023686a59a534e45af70bc5f99ae70e592481701680591f9844fc140a3db220a
|
||||
size 6847602688
|
||||
3
onnx/model_bnb4.onnx
Normal file
3
onnx/model_bnb4.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:ac7400ca170e6644d95d26394846d2cce8083298e8519b1df7ac1eaa0ab2b5b6
|
||||
size 1311307655
|
||||
3
onnx/model_fp16.onnx
Normal file
3
onnx/model_fp16.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3d891d77661f6339f727b3188f1c21b1429a1e4e55c40d1f60f85048b00a4348
|
||||
size 1326807956
|
||||
3
onnx/model_fp16.onnx_data
Normal file
3
onnx/model_fp16.onnx_data
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5f48c05c14ed97738f8dc5854c20c229ddc8661f43fa914085843901a4ba8740
|
||||
size 2097152000
|
||||
3
onnx/model_int8.onnx
Normal file
3
onnx/model_int8.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:db5cb9057f4e7014f00c38fd9764f0103e60bb4d145ad600b7144625f0d56930
|
||||
size 1714119778
|
||||
3
onnx/model_q4.onnx
Normal file
3
onnx/model_q4.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:467b7b8f62d99f184d3628d24b8d65c151e331695f6e9ea997616c4e279e9a51
|
||||
size 1411969607
|
||||
3
onnx/model_q4f16.onnx
Normal file
3
onnx/model_q4f16.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:d94946187fb5f27579f3db4ba21fb7f7466c7cbd18956bd420d3981f75282f9c
|
||||
size 1108730338
|
||||
3
onnx/model_quantized.onnx
Normal file
3
onnx/model_quantized.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b1110061268f161512c234c4d43cd9dbe61eeb1454e9b94b652edf4de55ae51c
|
||||
size 1714119846
|
||||
3
onnx/model_uint8.onnx
Normal file
3
onnx/model_uint8.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b1110061268f161512c234c4d43cd9dbe61eeb1454e9b94b652edf4de55ae51c
|
||||
size 1714119846
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:e6bfce1916438dd2e6553aa0a62d418087b3ae04f8af75e714ad1f01b7663db6
|
||||
size 114828
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b3d7723fd0715ce6dcbccf7bb2097f59490b0ac670f798f5378ef5abb7d1301d
|
||||
size 828
|
||||
34
special_tokens_map.json
Normal file
34
special_tokens_map.json
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"additional_special_tokens": [
|
||||
"<|im_start|>",
|
||||
"<|im_end|>"
|
||||
],
|
||||
"bos_token": {
|
||||
"content": "<|im_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "<|im_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": {
|
||||
"content": "<|im_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"unk_token": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
98249
tokenizer.json
Normal file
98249
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
154
tokenizer_config.json
Normal file
154
tokenizer_config.json
Normal file
@@ -0,0 +1,154 @@
|
||||
{
|
||||
"add_prefix_space": false,
|
||||
"added_tokens_decoder": {
|
||||
"0": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"1": {
|
||||
"content": "<|im_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"2": {
|
||||
"content": "<|im_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"3": {
|
||||
"content": "<repo_name>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"4": {
|
||||
"content": "<reponame>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"5": {
|
||||
"content": "<file_sep>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"6": {
|
||||
"content": "<filename>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"7": {
|
||||
"content": "<gh_stars>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"8": {
|
||||
"content": "<issue_start>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"9": {
|
||||
"content": "<issue_comment>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"10": {
|
||||
"content": "<issue_closed>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"11": {
|
||||
"content": "<jupyter_start>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"12": {
|
||||
"content": "<jupyter_text>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"13": {
|
||||
"content": "<jupyter_code>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"14": {
|
||||
"content": "<jupyter_output>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"15": {
|
||||
"content": "<jupyter_script>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"16": {
|
||||
"content": "<empty_output>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
}
|
||||
},
|
||||
"additional_special_tokens": [
|
||||
"<|im_start|>",
|
||||
"<|im_end|>"
|
||||
],
|
||||
"bos_token": "<|im_start|>",
|
||||
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful AI assistant named Cygnis, trained by CygnisAI<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"eos_token": "<|im_end|>",
|
||||
"model_max_length": 8192,
|
||||
"pad_token": "<|im_end|>",
|
||||
"tokenizer_class": "GPT2Tokenizer",
|
||||
"unk_token": "<|endoftext|>",
|
||||
"vocab_size": 49152
|
||||
}
|
||||
9
train_results.json
Normal file
9
train_results.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"epoch": 2.996074326092646,
|
||||
"total_flos": 0.0,
|
||||
"train_loss": 0.5334697115221363,
|
||||
"train_runtime": 7355.3343,
|
||||
"train_samples": 61134,
|
||||
"train_samples_per_second": 24.935,
|
||||
"train_steps_per_second": 0.195
|
||||
}
|
||||
2426
trainer_state.json
Normal file
2426
trainer_state.json
Normal file
File diff suppressed because it is too large
Load Diff
3
training_args.bin
Normal file
3
training_args.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:7649586c424c337f6c403fdb617ac9d954daf9a7192f3afe5b6318f37e9bb19e
|
||||
size 6520
|
||||
1
vocab.json
Normal file
1
vocab.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user