初始化项目,由ModelHub XC社区提供模型

Model: gia-uh/cecilia-2b-instruct-v1
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-05 08:30:49 +08:00
commit e1305a41d5
10 changed files with 226 additions and 0 deletions

39
.gitattributes vendored Normal file
View File

@@ -0,0 +1,39 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
cecilia_ft_ms_v1.gguf filter=lfs diff=lfs merge=lfs -text
tokenizer.json filter=lfs diff=lfs merge=lfs -text
cecilia-instruct-v1.f16.gguf filter=lfs diff=lfs merge=lfs -text
cecilia-2b-instruct-v1.f16.gguf filter=lfs diff=lfs merge=lfs -text

47
README.md Normal file
View File

@@ -0,0 +1,47 @@
---
library_name: transformers
tags:
- llama
- gguf
- safetensors
- finetune
base_model:
- gia-uh/cecilia-2b-v0.1
license: mit
datasets:
- gia-uh/maria-silvia-v1
language:
- es
- en
pipeline_tag: text-generation
---
# Cecilia: The Cuban Language Model
Cecilia is a family of language models continual pretrained specifically on Cuban written text, capturing the linguistic, cultural, and social nuances of Cuban Spanish. These models are designed to support natural language processing tasks with a focus on Cuban language varieties and cultural context.
## About Cecilia FT MS v1
This model is a fine-tuned version of **Cecilia 2B v0.1** which is a continual pre-trained model based on [Salamandra 2b](https://huggingface.co/BSC-LT/salamandra-2b). It belongs to the **Cecilia** collection and follows the same lineage as [Cecilia 2B v0.1](https://huggingface.co/gia-uh/cecilia-2b-v0.1).
## Model Formats
This repository is a **Hybrid Release** containing:
- **Safetensors:** For use with Hugging Face `transformers`.
- **GGUF (FP16):** For use with `llama.cpp`, `vLLM`, or local inference tools.
## Quantizations
Official quantized GGUF versions (Q8_0, Q6_K, Q4_K_M) in the repository [gia-uh/cecilia-2b-instruct-v1-GGUF](https://huggingface.co/gia-uh/cecilia-2b-instruct-v1-GGUF)
## Quickstart (Transformers)
```python
from transformers import AutoConfig, AutoModel, AutoTokenizer
repo_id = "gia-uh/cecilia_ft_ms_v1"
# Load model and tokenizer
config = AutoConfig.from_pretrained(repo_id, trust_remote_code=False)
tokenizer = AutoTokenizer.from_pretrained(repo_id)
model = AutoModel.from_pretrained(repo_id, trust_remote_code=False)
# Simple inference
inputs = tokenizer("Hola, que bolá?", return_tensors="pt")
outputs = model(**inputs)

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:57af0d98047e0c4efe9b647a8a3ba812bf36e6e1dd64a6275875f4e52c0efe8c
size 4513629344

30
config.json Normal file
View File

@@ -0,0 +1,30 @@
{
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 1,
"eos_token_id": 2,
"pad_token_id": 2,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 2048,
"initializer_range": 0.02,
"intermediate_size": 5440,
"max_position_embeddings": 8192,
"mlp_bias": false,
"model_type": "llama",
"num_attention_heads": 16,
"num_hidden_layers": 24,
"num_key_value_heads": 16,
"pretraining_tp": 1,
"rms_norm_eps": 1e-05,
"rope_scaling": null,
"rope_theta": 10000.0,
"tie_word_embeddings": false,
"torch_dtype": "bfloat16",
"transformers_version": "4.51.3",
"use_cache": false,
"vocab_size": 256000
}

6
generation_config.json Normal file
View File

@@ -0,0 +1,6 @@
{
"_from_model_config": true,
"bos_token_id": 1,
"eos_token_id": 2,
"transformers_version": "4.51.3"
}

3
model.safetensors Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8294b7435412e8acf7c22987163cec1f7e241eb6d732eda605c72afae14768b8
size 4507005744

28
special_tokens_map.json Normal file
View File

@@ -0,0 +1,28 @@
{
"additional_special_tokens": [
"<|im_start|>",
"<|im_end|>"
],
"bos_token": {
"content": "<s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": "</s>",
"unk_token": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
}
}

3
tokenizer.json Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:990527d1e7b98c027d386c742250b2f8517bd3adf98c46cc6c1c2f35b234c224
size 37007559

3
tokenizer.model Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ab94ddf46d14f0279254858d53770c5319c5129d47291ee2bada530271cb1292
size 4813276

64
tokenizer_config.json Normal file
View File

@@ -0,0 +1,64 @@
{
"add_bos_token": true,
"add_eos_token": false,
"add_prefix_space": true,
"added_tokens_decoder": {
"0": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"1": {
"content": "<s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"2": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"4": {
"content": "<|im_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"5": {
"content": "<|im_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
}
},
"additional_special_tokens": [
"<|im_start|>",
"<|im_end|>"
],
"bos_token": "<s>",
"chat_template": "{%- if not date_string is defined %}{%- set date_string = \"2025-11-13\" %}{%- endif %}{%- set base_system_message = \"Soy CecilIA, un modelo de lenguaje experimental desarrollado en colaboración entre la Facultad de Matemática y Computación (MATCOM) de la Universidad de La Habana y el Grupo de Procesamiento del Lenguaje y Sistemas de Información (GPLSI) de la Universidad de Alicante, entrenado para entender y procesar el español hablado en Cuba. Estás diseñado para responder a preguntas de cultura, historia, geografía, y conversar de forma general sobre Cuba. Responde siempre de forma amigable y coloquial.\" -%}{%- if messages and messages[0].role == \"system\" -%}{%- set task_system = messages[0].content -%}{%- set messages = messages[1:] -%}{%- else -%}{%- set task_system = \"\" -%}{%- endif -%}{%- if task_system -%}{%- set system_message = base_system_message + \"\n\" + task_system -%}{%- else -%}{%- set system_message = base_system_message -%}{%- endif -%}{{ \"<|im_start|>system\n\" + system_message + \"<|im_end|>\n\" }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"clean_up_tokenization_spaces": false,
"eos_token": "</s>",
"legacy": true,
"model_max_length": 8192,
"pad_token": "<unk>",
"padding_side": "right",
"sp_model_kwargs": {},
"spaces_between_special_tokens": false,
"tokenizer_class": "LlamaTokenizer",
"unk_token": "<unk>",
"use_default_system_prompt": false
}