初始化项目,由ModelHub XC社区提供模型

Model: TucanoBR/Tucano-1b1-Instruct
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-01 23:37:32 +08:00
commit 5f6610ac10
25 changed files with 66701 additions and 0 deletions

35
.gitattributes vendored Normal file
View File

@@ -0,0 +1,35 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text

3
EVALS-1b1-instruct.csv Normal file
View File

@@ -0,0 +1,3 @@
model,average,calame_pt,lambada_pt,enem,bluex,oab_exams,assin2_rte,assin2_sts,faquad_nli,hatebr,hatespeech_pt,tweet_br,arc_pt,hellaswag_pt,truthfulqa
Tucano-1b1-sft,33.66,56.74,34.66,21.06,24.61,26.20,33.42,0.87,43.97,33.33,41.23,40.65,30.60,42.83,41.17
Tucano-1b1-dpo,35.44,56.55,35.53,21.06,26.01,26.47,67.78,8.88,43.97,31.28,41.23,22.03,30.77,43.50,41.14
1 model average calame_pt lambada_pt enem bluex oab_exams assin2_rte assin2_sts faquad_nli hatebr hatespeech_pt tweet_br arc_pt hellaswag_pt truthfulqa
2 Tucano-1b1-sft 33.66 56.74 34.66 21.06 24.61 26.20 33.42 0.87 43.97 33.33 41.23 40.65 30.60 42.83 41.17
3 Tucano-1b1-dpo 35.44 56.55 35.53 21.06 26.01 26.47 67.78 8.88 43.97 31.28 41.23 22.03 30.77 43.50 41.14

190
LICENSE Normal file
View File

@@ -0,0 +1,190 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright Nicholas Kluge Corrêa, Aniket Sen, Sophia Falk, and Shiza Fatimah
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

445
README.md Normal file
View File

@@ -0,0 +1,445 @@
---
language:
- pt
license: apache-2.0
library_name: transformers
tags:
- text-generation-inference
datasets:
- nicholasKluge/instruct-aira-dataset-v3
- cnmoro/GPT4-500k-Augmented-PTBR-Clean
- rhaymison/orca-math-portuguese-64k
- nicholasKluge/reward-aira-dataset
metrics:
- perplexity
pipeline_tag: text-generation
widget:
- text: "<instruction>Cite algumas bandas de rock brasileiras famosas.</instruction>"
example_title: Exemplo
- text: "<instruction>Invente uma história sobre um encanador com poderes mágicos.</instruction>"
example_title: Exemplo
- text: "<instruction>Qual cidade é a capital do estado do Rio Grande do Sul?</instruction>"
example_title: Exemplo
- text: "<instruction>Diga o nome de uma maravilha culinária característica da cosinha Portuguesa?</instruction>"
example_title: Exemplo
inference:
parameters:
repetition_penalty: 1.2
temperature: 0.1
top_k: 50
top_p: 1.0
max_new_tokens: 150
co2_eq_emissions:
emissions: 21890
source: CodeCarbon
training_type: pre-training
geographical_location: Germany
hardware_used: NVIDIA A100-SXM4-80GB
model-index:
- name: Tucano-1b1-Instruct
results:
- task:
type: text-generation
name: Text Generation
dataset:
name: CALAME-PT
type: NOVA-vision-language/calame-pt
split: all
args:
num_few_shot: 0
metrics:
- type: acc
value: 56.55
name: accuracy
source:
url: https://huggingface.co/datasets/NOVA-vision-language/calame-pt
name: Context-Aware LAnguage Modeling Evaluation for Portuguese
- task:
type: text-generation
name: Text Generation
dataset:
name: LAMBADA-PT
type: TucanoBR/lambada-pt
split: train
args:
num_few_shot: 0
metrics:
- type: acc
value: 35.53
name: accuracy
source:
url: https://huggingface.co/datasets/TucanoBR/lambada-pt
name: LAMBADA-PT
- task:
type: text-generation
name: Text Generation
dataset:
name: ENEM Challenge (No Images)
type: eduagarcia/enem_challenge
split: train
args:
num_few_shot: 3
metrics:
- type: acc
value: 21.06
name: accuracy
source:
url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard
name: Open Portuguese LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: BLUEX (No Images)
type: eduagarcia-temp/BLUEX_without_images
split: train
args:
num_few_shot: 3
metrics:
- type: acc
value: 26.01
name: accuracy
source:
url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard
name: Open Portuguese LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: OAB Exams
type: eduagarcia/oab_exams
split: train
args:
num_few_shot: 3
metrics:
- type: acc
value: 26.47
name: accuracy
source:
url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard
name: Open Portuguese LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: Assin2 RTE
type: assin2
split: test
args:
num_few_shot: 15
metrics:
- type: f1_macro
value: 67.78
name: f1-macro
source:
url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard
name: Open Portuguese LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: Assin2 STS
type: eduagarcia/portuguese_benchmark
split: test
args:
num_few_shot: 10
metrics:
- type: pearson
value: 8.88
name: pearson
source:
url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard
name: Open Portuguese LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: FaQuAD NLI
type: ruanchaves/faquad-nli
split: test
args:
num_few_shot: 15
metrics:
- type: f1_macro
value: 43.97
name: f1-macro
source:
url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard
name: Open Portuguese LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: HateBR Binary
type: ruanchaves/hatebr
split: test
args:
num_few_shot: 25
metrics:
- type: f1_macro
value: 31.28
name: f1-macro
source:
url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard
name: Open Portuguese LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: PT Hate Speech Binary
type: hate_speech_portuguese
split: test
args:
num_few_shot: 25
metrics:
- type: f1_macro
value: 41.23
name: f1-macro
source:
url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard
name: Open Portuguese LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: tweetSentBR
type: eduagarcia-temp/tweetsentbr
split: test
args:
num_few_shot: 25
metrics:
- type: f1_macro
value: 22.03
name: f1-macro
source:
url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard
name: Open Portuguese LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: ARC-Challenge (PT)
type: arc_pt
args:
num_few_shot: 25
metrics:
- type: acc_norm
value: 30.77
name: normalized accuracy
source:
url: https://github.com/nlp-uoregon/mlmm-evaluation
name: Evaluation Framework for Multilingual Large Language Models
- task:
type: text-generation
name: Text Generation
dataset:
name: HellaSwag (PT)
type: hellaswag_pt
args:
num_few_shot: 10
metrics:
- type: acc_norm
value: 43.50
name: normalized accuracy
source:
url: https://github.com/nlp-uoregon/mlmm-evaluation
name: Evaluation Framework for Multilingual Large Language Models
- task:
type: text-generation
name: Text Generation
dataset:
name: TruthfulQA (PT)
type: truthfulqa_pt
args:
num_few_shot: 0
metrics:
- type: mc2
value: 41.14
name: bleurt
source:
url: https://github.com/nlp-uoregon/mlmm-evaluation
name: Evaluation Framework for Multilingual Large Language Models
- task:
type: text-generation
name: Text Generation
dataset:
name: Alpaca-Eval (PT)
type: alpaca_eval_pt
args:
num_few_shot: 0
metrics:
- type: lc_winrate
value: 8.80
name: length controlled winrate
source:
url: https://github.com/tatsu-lab/alpaca_eval
name: AlpacaEval
base_model:
- TucanoBR/Tucano-1b1
---
# Tucano-1b1-Instruct
<img src="./logo.png" alt="An illustration of a Tucano bird showing vibrant colors like yellow, orange, blue, green, and black." height="200">
## Model Summary
Tucano-1b1-Instruct is a fine-tuned version of [Tucano-1b1](https://huggingface.co/TucanoBR/Tucano-1b1). **[Tucano](https://huggingface.co/TucanoBR)** is a series of decoder-transformers natively pretrained in Portuguese. All Tucano models were trained on **[GigaVerbo](https://huggingface.co/datasets/TucanoBR/GigaVerbo)**, a concatenation of deduplicated Portuguese text corpora amounting to 200 billion tokens.
The fine-tuning process was divided into two stages:
- Supervised fine-tuning (SFT) using the [TucanoBR/Tucano-SFT](https://huggingface.co/datasets/TucanoBR/Tucano-SFT), a concatenation of three different instruction tuning datasets ([`cnmoro/GPT4-500k-Augmented-PTBR-Clean`](https://huggingface.co/datasets/cnmoro/GPT4-500k-Augmented-PTBR-Clean), [`rhaymison/orca-math-portuguese-64k`](https://huggingface.co/datasets/rhaymison/orca-math-portuguese-64k), [`nicholasKluge/instruct-aira-dataset-v3`](https://huggingface.co/datasets/nicholasKluge/instruct-aira-dataset-v3)).
- Direct Preference Optimization (DPO) using the [nicholasKluge/reward-aira-dataset](https://huggingface.co/datasets/nicholasKluge/reward-aira-dataset).
Read our preprint [here](https://arxiv.org/abs/2411.07854).
## Details
- **Architecture:** a Transformer-based model pre-trained via causal language modeling
- **Size:** 1,100,048,384 parameters
- **Context length:** 2048 tokens
- **Dataset:**
- [cnmoro/GPT4-500k-Augmented-PTBR-Clean](https://huggingface.co/datasets/cnmoro/GPT4-500k-Augmented-PTBR-Clean)
- [rhaymison/orca-math-portuguese-64k](https://huggingface.co/datasets/rhaymison/orca-math-portuguese-64k)
- [nicholasKluge/instruct-aira-dataset-v3](https://huggingface.co/datasets/nicholasKluge/instruct-aira-dataset-v3)
- [nicholasKluge/reward-aira-dataset](https://huggingface.co/datasets/nicholasKluge/reward-aira-dataset)
- **Language:** Portuguese
- **Training time**: ~ 12 hours
- **Emissions:** 22 KgCO2 (Germany)
- **Total energy consumption:** 58 kWh
This repository has the [source code](https://github.com/Nkluge-correa/Tucano) used to train this model. The main libraries used are:
- [PyTorch](https://github.com/pytorch/pytorch)
- [Transformers](https://github.com/huggingface/transformers)
- [Datasets](https://github.com/huggingface/datasets)
- [Tokenizers](https://github.com/huggingface/tokenizers)
- [Sentencepiece](https://github.com/google/sentencepiece)
- [Accelerate](https://github.com/huggingface/accelerate)
- [FlashAttention](https://github.com/Dao-AILab/flash-attention)
- [Liger Kernel](https://github.com/linkedin/Liger-Kernel)
- [Codecarbon](https://github.com/mlco2/codecarbon)
- [TRL](https://github.com/huggingface/trl)
## Intended Uses
The primary intended use of the Tucano models is to serve as foundations for research and development involving native Portuguese language modeling. Checkpoints saved during training are designed to provide a controlled setting for performing comparative experiments, specifically regarding the effects of active pretraining on the performance of currently available benchmarks. You may also fine-tune and adapt Tucano models for deployment if your use follows the Apache 2.0 license. If you decide to use the Tucano models as a basis for your fine-tuned model, please conduct your own risk and bias assessment.
## Out-of-scope Use
- Tucano models are **not intended for deployment**. They are not an out-of-the-box product and should not be used for human-facing interactions.
- Tucano models are for **the Portuguese language only** and are unsuitable for text generation tasks in other languages.
- Tucano models have **not been fine-tuned** for downstream tasks.
## Basic usage
Using the `pipeline`:
```python
from transformers import pipeline
generator = pipeline("text-generation", model="TucanoBR/Tucano-1b1-Instruct")
completions = generator("<instruction>Qual cidade é a capital do estado do Rio Grande do Sul?</instruction>", num_return_sequences=2, max_new_tokens=100)
for comp in completions:
print(f"🤖 {comp['generated_text']}")
```
Using the `AutoTokenizer` and `AutoModelForCausalLM`:
```python
from transformers import GenerationConfig, TextGenerationPipeline, AutoTokenizer, AutoModelForCausalLM
import torch
# Specify the model and tokenizer
model_id = "TucanoBR/Tucano-1b1-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
# Specify the generation parameters as you like
generation_config = GenerationConfig(
**{
"do_sample": True,
"max_new_tokens": 2048,
"renormalize_logits": True,
"repetition_penalty": 1.2,
"temperature": 0.1,
"top_k": 50,
"top_p": 1.0,
"use_cache": True,
}
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
generator = TextGenerationPipeline(model=model, task="text-generation", tokenizer=tokenizer, device=device)
# Generate text
prompt = "<instruction>Qual cidade é a capital do estado do Rio Grande do Sul?</instruction>"
completion = generator(prompt, generation_config=generation_config)
print(completion[0]['generated_text'])
```
## Limitations
Like almost all other language models trained on large text datasets scraped from the web, the Tucano models show behavior that does not make them an out-of-the-box solution to many real-world applications, especially those requiring factual, reliable, and nontoxic text generation. Tucano models are all subject to the following:
- **Hallucinations:** Tucano models can produce content that can be mistaken as true facts, but are misleading or entirely false, i.e., hallucination.
- **Biases and Toxicity:** Tucano models inherit the social and historical stereotypes from the data used to train them. Given these biases, the model can produce toxic content, i.e., harmful, offensive, or detrimental to individuals, groups, or communities.
- **Unreliable Code:** Tucano models may produce incorrect code snippets and statements. These code generations should not be treated as suggestions or accurate solutions.
- **Language Limitations:** Tucano models are primarily designed to interact with Portuguese. Other languages might challenge its comprehension, leading to potential misinterpretations or errors in response.
- **Repetition and Verbosity:** Tucano models may get stuck on repetition loops (especially if the repetition penalty during generations is set to a meager value) or produce verbose responses unrelated to the prompt it was given.
Hence, even though our models are released with a permissive license, we urge users to perform their risk analysis on them if they intend to use them for real-world applications.
## Evaluations
To evaluate the `Instruct` versions of our models, we used [AlpacaEval](https://github.com/tatsu-lab/alpaca_eval) 2.0 with length-controlled win rates, a fast and relatively cheap evaluation method that is highly correlated with human preferences and evaluations of pairwise comparisons. To learn more about our evaluation read [our documentation](https://github.com/Nkluge-correa/Tucano/blob/main/evaluations/README.md).
| | Avg. Length | Wins | Base Wins | Total Matches | Length-Controlled Win Rate (%) | LC Std. Error |
|-------------------------|-------------|------|-----------|---------------|--------------------------------|---------------|
| Llama-3.2-3B-Instruct | 1609 | 257 | 548 | 805 | 21.06 | 0.075 |
| **Tucano-2b4-Instruct** | 1843 | 151 | 654 | 805 | 13.00 | 0.071 |
| **Tucano-1b1-Instruct** | 1667 | 124 | 681 | 805 | 8.80 | 0.083 |
| Llama-3.2-1B-Instruct | 1429 | 99 | 706 | 805 | 7.15 | 0.057 |
| TeenyTinyLlama-460m-Chat| 1333 | 28 | 777 | 805 | 2.84 | 0.059 |
| Sabiá-7b | 5011 | 1 | 804 | 805 | 0.076 | 0.0043 |
| Gervásio-7b | 5740 | 1 | 804 | 805 | 0.026 | 0.0016 |
## Cite as 🤗
```latex
@misc{correa2024tucanoadvancingneuraltext,
title={{Tucano: Advancing Neural Text Generation for Portuguese}},
author={Corr{\^e}a, Nicholas Kluge and Sen, Aniket and Falk, Sophia and Fatimah, Shiza},
year={2024},
eprint={2411.07854},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2411.07854},
}
@article{correa2025tucanoadvancingneuraltext,
title={{Tucano: Advancing Neural Text Generation for Portuguese}},
author={Corr{\^e}a, Nicholas Kluge and Sen, Aniket and Falk, Sophia and Fatimah, Shiza},
journal={Patterns},
publisher={Elsevier},
year={2025},
doi={10.1016/j.patter.2025.101325},
url={https://doi.org/10.1016/j.patter.2025.101325},
issn={2666-3899}
}
```
## Aknowlegments
We gratefully acknowledge the granted access to the [Marvin cluster](https://www.hpc.uni-bonn.de/en/systems/marvin) hosted by [University of Bonn](https://www.uni-bonn.de/en) along with the support provided by its High Performance Computing \& Analytics Lab.
## License
Tucano is licensed under the Apache License, Version 2.0. For more details, see the [LICENSE](LICENSE) file.

3
alpaca-eval.csv Normal file
View File

@@ -0,0 +1,3 @@
,win_rate,standard_error,mode,avg_length,n_wins,n_wins_base,n_draws,n_total,discrete_win_rate,length_controlled_winrate,lc_standard_error
Tucano-1b1-dpo,15.419484227948896,1.160056594824009,community,1667,124,681,0,805,15.403726708074533,8.800275585416614,0.08352323105119325
Tucano-1b1-sft,7.893483774877183,0.830060323275043,community,1075,58,747,0,805,7.204968944099378,6.376247766233091,0.07082994421269635
1 win_rate standard_error mode avg_length n_wins n_wins_base n_draws n_total discrete_win_rate length_controlled_winrate lc_standard_error
2 Tucano-1b1-dpo 15.419484227948896 1.160056594824009 community 1667 124 681 0 805 15.403726708074533 8.800275585416614 0.08352323105119325
3 Tucano-1b1-sft 7.893483774877183 0.830060323275043 community 1075 58 747 0 805 7.204968944099378 6.376247766233091 0.07082994421269635

30
config.json Normal file
View File

@@ -0,0 +1,30 @@
{
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 1,
"eos_token_id": 2,
"head_dim": 64,
"hidden_act": "silu",
"hidden_size": 2048,
"initializer_range": 0.02,
"intermediate_size": 5632,
"max_position_embeddings": 2048,
"mlp_bias": false,
"model_type": "llama",
"num_attention_heads": 32,
"num_hidden_layers": 22,
"num_key_value_heads": 4,
"pad_token_id": 3,
"pretraining_tp": 1,
"rms_norm_eps": 1e-05,
"rope_scaling": null,
"rope_theta": 10000.0,
"tie_word_embeddings": false,
"torch_dtype": "float32",
"transformers_version": "4.44.2",
"use_cache": false,
"vocab_size": 32002
}

View File

@@ -0,0 +1,3 @@
timestamp,project_name,run_id,experiment_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue,n_nodes
2024-09-30T07:04:42,Tucano-sft,354e308a-920a-4f70-903f-1ec02bb535ef,5b0fa12a-3dd7-45bb-9766-cc326314d9f1,36612.863234638,10.13847230867952,0.0002769101188209696,112.5,2131.3615768643012,365.62500000000006,1.144150400307572,21.761199203945356,3.7083065413816914,26.613656145634646,Germany,DEU,north rhine-westphalia,,,Linux-5.14.0-284.30.1.el9_2.x86_64-x86_64-with-glibc2.34,3.11.3,2.7.1,256,AMD EPYC 7713 64-Core Processor,8,8 x NVIDIA A40,7.0932,50.7263,975,machine,N,1.0,2
2024-11-08T17:36:35,Tucano-dpo,fa6b7e78-e5d0-45e3-9855-f2be2d9f0d71,5b0fa12a-3dd7-45bb-9766-cc326314d9f1,6364.964171009138,1.612778411754893,0.000253383737665124,112.5,748.4520727803413,188.89137554168704,0.1989048316897971,3.701681019953554,0.3329839390244344,4.233569790667786,Germany,DEU,north rhine-westphalia,,,Linux-5.14.0-284.30.1.el9_2.x86_64-x86_64-with-glibc2.34,3.11.3,2.7.2,128,AMD EPYC 7713 64-Core Processor,8,8 x NVIDIA A40,7.1178,50.7246,503.71033477783203,machine,N,1.0,1
1 timestamp project_name run_id experiment_id duration emissions emissions_rate cpu_power gpu_power ram_power cpu_energy gpu_energy ram_energy energy_consumed country_name country_iso_code region cloud_provider cloud_region os python_version codecarbon_version cpu_count cpu_model gpu_count gpu_model longitude latitude ram_total_size tracking_mode on_cloud pue n_nodes
2 2024-09-30T07:04:42 Tucano-sft 354e308a-920a-4f70-903f-1ec02bb535ef 5b0fa12a-3dd7-45bb-9766-cc326314d9f1 36612.863234638 10.13847230867952 0.0002769101188209696 112.5 2131.3615768643012 365.62500000000006 1.144150400307572 21.761199203945356 3.7083065413816914 26.613656145634646 Germany DEU north rhine-westphalia Linux-5.14.0-284.30.1.el9_2.x86_64-x86_64-with-glibc2.34 3.11.3 2.7.1 256 AMD EPYC 7713 64-Core Processor 8 8 x NVIDIA A40 7.0932 50.7263 975 machine N 1.0 2
3 2024-11-08T17:36:35 Tucano-dpo fa6b7e78-e5d0-45e3-9855-f2be2d9f0d71 5b0fa12a-3dd7-45bb-9766-cc326314d9f1 6364.964171009138 1.612778411754893 0.000253383737665124 112.5 748.4520727803413 188.89137554168704 0.1989048316897971 3.701681019953554 0.3329839390244344 4.233569790667786 Germany DEU north rhine-westphalia Linux-5.14.0-284.30.1.el9_2.x86_64-x86_64-with-glibc2.34 3.11.3 2.7.2 128 AMD EPYC 7713 64-Core Processor 8 8 x NVIDIA A40 7.1178 50.7246 503.71033477783203 machine N 1.0 1

15
evals-dpo.yaml Normal file
View File

@@ -0,0 +1,15 @@
arc_pt: 30.77
assin2_rte: 67.78
assin2_sts: 8.88
bluex: 26.01
calame_pt: 56.55
enem: 21.06
faquad_nli: 43.97
hatebr: 31.28
hatespeech_pt: 41.23
hellaswag_pt: 43.50
lambada_pt: 35.53
oab_exams: 26.47
step: 6300
truthfulqa: 41.14
tweet_br: 22.03

15
evals-sft.yaml Normal file
View File

@@ -0,0 +1,15 @@
arc_pt: 30.60
assin2_rte: 33.42
assin2_sts: 0.87
bluex: 24.61
calame_pt: 56.74
enem: 21.06
faquad_nli: 43.97
hatebr: 33.33
hatespeech_pt: 41.23
hellaswag_pt: 42.83
lambada_pt: 34.66
oab_exams: 26.20
step: 21000
truthfulqa: 41.17
tweet_br: 40.65

3
flax_model.msgpack Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2cf753c8d2965e56535ce6b0f94b2002ac2426c047f714adf4b427ab864f42c3
size 4400235635

14
generation_config.json Normal file
View File

@@ -0,0 +1,14 @@
{
"bos_token_id": 1,
"eos_token_id": 2,
"pad_token_id": 3,
"do_sample": true,
"max_new_tokens": 1024,
"renormalize_logits": true,
"repetition_penalty": 1.2,
"temperature": 0.1,
"top_k": 50,
"top_p": 1.0,
"use_cache": true,
"transformers_version": "4.42.3"
}

BIN
logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 166 KiB

3
model.safetensors Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0ba9d245402d91a84cac0674a6c0ea47b2ba8cc680044af6ba680c6b97dabacc
size 4400249304

3
pytorch_model.bin Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c5b5b400bec694f6f943d2f93937cdb073d04b28ac62beb5c6aae7d34de359e0
size 4400291518

View File

@@ -0,0 +1,37 @@
{
"results": {
"arc_pt": {
"acc": 0.24444444444444444,
"acc_stderr": 0.012569442967524477,
"acc_norm": 0.3076923076923077,
"acc_norm_stderr": 0.01349897032094141
},
"hellaswag_pt": {
"acc": 0.3567017011593889,
"acc_stderr": 0.004986605352113748,
"acc_norm": 0.43504171632896305,
"acc_norm_stderr": 0.005160833515742615
},
"truthfulqa_pt": {
"mc1": 0.22715736040609136,
"mc1_stderr": 0.014935562313676114,
"mc2": 0.4114457861517422,
"mc2_stderr": 0.014884914480065946
}
},
"versions": {
"arc_pt": 0,
"hellaswag_pt": 1,
"truthfulqa_pt": 1
},
"config": {
"model": "hf-auto",
"model_args": "pretrained=/lustre/mlnvme/data/asen_hpc-mula/Tucano-1b1-DPO",
"batch_size": 1,
"device": "cuda:0",
"no_cache": false,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}

View File

@@ -0,0 +1,37 @@
{
"results": {
"arc_pt": {
"acc": 0.24444444444444444,
"acc_stderr": 0.012569442967524465,
"acc_norm": 0.305982905982906,
"acc_norm_stderr": 0.01347802974882896
},
"hellaswag_pt": {
"acc": 0.35572651424856433,
"acc_stderr": 0.004983557286529839,
"acc_norm": 0.42832376205439376,
"acc_norm_stderr": 0.005151187541296219
},
"truthfulqa_pt": {
"mc1": 0.23857868020304568,
"mc1_stderr": 0.015192910034567013,
"mc2": 0.411729137477953,
"mc2_stderr": 0.014880046850377518
}
},
"versions": {
"arc_pt": 0,
"hellaswag_pt": 1,
"truthfulqa_pt": 1
},
"config": {
"model": "hf-auto",
"model_args": "pretrained=/lustre/mlnvme/data/asen_hpc-mula/checkpoints-llama/slurm_job_17066349/step_21084",
"batch_size": 1,
"device": "cuda:0",
"no_cache": false,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}

1303
results-pt-dpo.json Normal file

File diff suppressed because it is too large Load Diff

1303
results-pt-sft.json Normal file

File diff suppressed because it is too large Load Diff

34
special_tokens_map.json Normal file
View File

@@ -0,0 +1,34 @@
{
"additional_special_tokens": [
"<instruction>",
"</instruction>"
],
"bos_token": {
"content": "<s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "<pad>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"unk_token": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
}
}

63138
tokenizer.json Normal file

File diff suppressed because it is too large Load Diff

75
tokenizer_config.json Normal file
View File

@@ -0,0 +1,75 @@
{
"add_bos_token": false,
"add_eos_token": false,
"add_prefix_space": null,
"added_tokens_decoder": {
"0": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"1": {
"content": "<s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"2": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"3": {
"content": "<pad>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"32000": {
"content": "<instruction>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
},
"32001": {
"content": "</instruction>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
}
},
"additional_special_tokens": [
"<instruction>",
"</instruction>"
],
"bos_token": "<s>",
"bos_token_id": 1,
"chat_template": "{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '<instruction>' + message['content'].strip() + '</instruction>'}}{% elif message['role'] == 'assistant' %}{{ message['content'].strip() + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
"clean_up_tokenization_spaces": false,
"eos_token": "</s>",
"eos_token_id": 2,
"legacy": false,
"model_max_length": 2048,
"pad_token": "<pad>",
"pad_token_id": 3,
"padding_side": "right",
"sp_model_kwargs": {},
"tokenizer_class": "LlamaTokenizer",
"unk_token": "<unk>",
"unk_token_id": 0,
"use_default_system_prompt": false
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:25e6bb1adaa63f6ab75a8d7afb6291ecb258c33396a1fab82f4f8b50609aaf6f
size 6768

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7c0a735bda022b466ea4e54a901a1aafc1f1d159d2d12feadce808395d75b242
size 655585

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:eec0042a6c67716e4e8864db28ad69f72451173c6cf4f2720fb1b718601bf1c9
size 3755

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9519fbee21fef0dd196f26dc10c50c0842fa3b13ba85df6979f4d29ccc4c4f1a
size 3053