初始化项目,由ModelHub XC社区提供模型

Model: Joaoffg/ELM
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-13 10:08:34 +08:00
commit 4ce2bbdbca
8 changed files with 184 additions and 0 deletions

35
.gitattributes vendored Normal file
View File

@@ -0,0 +1,35 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text

43
README.md Normal file
View File

@@ -0,0 +1,43 @@
---
license: llama2
language:
- en
- nl
tags:
- academic
- university
---
# Model Card for the Erasmian Language Model
ELM is a community driven large language model tailored to the research and education needs of Erasmus University (EUR, Netherlands) students and staff.
The model draws inspiration from ChatGPT and Llama in terms of architecture, but it aims to be privacy sensitive, environmentally conscious, and from and for the Erasmus community. Here are a few key points of ELM:
We hope that the ELM experience becomes a template for community driven, decentralized and purpuseful AI development and application.
## Model Details
### Model Description
The underlying language model is trained and fine-tuned on academic outputs from Erasmus University, such as scientific papers or student theses;
Training and fine-tuning the model is a joint effort of students and staff, transparent for all parties involved;
The prompt-response examples used to fine tune the model come from students and staff, not crowdsourcing services;
Defining what is the "better" model output also comes from the perspective of research and education.
The true richness of ELM lies in the way its training data is generated. What is the "state-of-the-art" model may change quickly, but quality data will maintain its relevance and ensure that ELM and its future iterations serve the needs of the community that nurtured it.
- **Developed by:** João Gonçalves, Nick Jelicic
- **Funded by [optional]:** Convergence AI and Digitalization, Erasmus Trustfonds
- **Model type:** Llama-2 Instruct
- **Language(s) (NLP):** English, Dutch
- **License:** Llama2
### Model Sources
<!-- Provide the basic links for the model. -->
- **Repository:** https://github.com/Joaoffg/ELM
- **Paper:** https://arxiv.org/abs/2408.06931
- **Demo:** https://huggingface.co/spaces/Joaoffg/Joaoffg-ELM

29
config.json Normal file
View File

@@ -0,0 +1,29 @@
{
"_name_or_path": "/data/storage_elm_medium/CHAT_ELM_7M_RLHF_Merged",
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 1,
"eos_token_id": 2,
"hidden_act": "silu",
"hidden_size": 2048,
"initializer_range": 0.02,
"intermediate_size": 5120,
"max_position_embeddings": 2048,
"model_type": "llama",
"num_attention_heads": 32,
"num_hidden_layers": 16,
"num_key_value_heads": 32,
"pad_token_id": 0,
"pretraining_tp": 1,
"rms_norm_eps": 1e-12,
"rope_scaling": null,
"rope_theta": 10000.0,
"tie_word_embeddings": false,
"torch_dtype": "float32",
"transformers_version": "4.40.2",
"use_cache": false,
"vocab_size": 32000
}

6
generation_config.json Normal file
View File

@@ -0,0 +1,6 @@
{
"_from_model_config": true,
"bos_token_id": 1,
"eos_token_id": 2,
"transformers_version": "4.40.2"
}

3
model.safetensors Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e98d729a26e589f9c2ce43cfaf1c53e8f7b78e0b6b85f784bddb20136308e9f1
size 3611582888

23
special_tokens_map.json Normal file
View File

@@ -0,0 +1,23 @@
{
"bos_token": {
"content": "<s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "</s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"unk_token": {
"content": "<unk>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
}
}

3
tokenizer.model Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3801a11a627988531d236ce6dc33cf32c7970a232d1a18bc2aa4f6468fefcd54
size 7170399

42
tokenizer_config.json Normal file
View File

@@ -0,0 +1,42 @@
{
"add_bos_token": true,
"add_eos_token": false,
"add_prefix_space": true,
"added_tokens_decoder": {
"0": {
"content": "<unk>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
},
"1": {
"content": "<s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
},
"2": {
"content": "</s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
}
},
"bos_token": "<s>",
"clean_up_tokenization_spaces": false,
"eos_token": "</s>",
"legacy": true,
"model_max_length": 1000000000000000019884624838656,
"pad_token": null,
"sp_model_kwargs": {},
"spaces_between_special_tokens": false,
"tokenizer_class": "LlamaTokenizer",
"unk_token": "<unk>",
"use_default_system_prompt": true
}