初始化项目,由ModelHub XC社区提供模型

Model: reasonir/ReasonIR-8B
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-14 17:25:26 +08:00
commit 656b1048bd
16 changed files with 4426 additions and 0 deletions

36
.gitattributes vendored Normal file
View File

@@ -0,0 +1,36 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
tokenizer.json filter=lfs diff=lfs merge=lfs -text

10
1_Pooling/config.json Normal file
View File

@@ -0,0 +1,10 @@
{
"word_embedding_dimension": 4096,
"pooling_mode_cls_token": false,
"pooling_mode_mean_tokens": true,
"pooling_mode_max_tokens": false,
"pooling_mode_mean_sqrt_len_tokens": false,
"pooling_mode_weightedmean_tokens": false,
"pooling_mode_lasttoken": false,
"include_prompt": false
}

88
README.md Normal file
View File

@@ -0,0 +1,88 @@
---
base_model:
- meta-llama/Llama-3.1-8B
language:
- en
license: cc-by-nc-4.0
pipeline_tag: feature-extraction
library_name: transformers
tags:
- sentence-transformers
---
## Model Summary
ReasonIR-8B is the first retriever specifically trained for general reasoning tasks, achieving the state-of-the-art retrieval performance
on BRIGHT (reasoning-intensive retrieval).
When employed for retrieval-augmented generation (RAG), ReasonIR-8B also brings substantial gains on MMLU and GPQA.
- Paper: https://arxiv.org/abs/2504.20595
- Repository: https://github.com/facebookresearch/ReasonIR
- Data: https://huggingface.co/datasets/reasonir/reasonir-data
## Usage
Make sure to install `transformers>=4.47.0` first!
### Transformers
```python
from transformers import AutoModel
model = AutoModel.from_pretrained("reasonir/ReasonIR-8B", torch_dtype="auto", trust_remote_code=True)
model = model.to("cuda")
model.eval()
query = "The quick brown fox jumps over the lazy dog."
document = "The quick brown fox jumps over the lazy dog."
query_instruction = ""
doc_instruction = ""
query_emb = model.encode(query, instruction=query_instruction)
doc_emb = model.encode(document, instruction=doc_instruction)
sim = query_emb @ doc_emb.T
```
When using `AutoModel`, it is important to:
1. Include `trust_remote_code=True` to make sure our custom bidirectional encoding architecture is used.
2. Use `torch_dtype="auto"` so that `bf16` is activated (by default torch will use `fp32`).
### Sentence Transformers
In addition to Transformers, you can also use this model with Sentence Transformers
```python
# pip install sentence-transformers
from sentence_transformers import SentenceTransformer
model_kwargs = {"torch_dtype": "auto"}
model = SentenceTransformer("reasonir/ReasonIR-8B", trust_remote_code=True, model_kwargs=model_kwargs)
query = "The quick brown fox jumps over the lazy dog."
document = "The quick brown fox jumps over the lazy dog."
query_instruction = ""
doc_instruction = ""
query_emb = model.encode(query, prompt=query_instruction)
doc_emb = model.encode(document, prompt=doc_instruction)
sim = model.similarity(query_emb, doc_emb)
```
It is important to also include `trust_remote_code=True` and `torch_dtype="auto"` as discussed earlier.
> [!NOTE]
> There are some very slight floating point discrepancies when using the model via SentenceTransformer caused by how the models are cast to the `bfloat16` dtype, though it should not affect the results in general.
We thank [@tomaarsen](https://huggingface.co/tomaarsen) for improving the SentenceTransformer integration and analyzing the cause of the floating point discrepancies!
## Citation
```
@article{shao2025reasonir,
title={ReasonIR: Training Retrievers for Reasoning Tasks},
author={Rulin Shao and Rui Qiao and Varsha Kishore and Niklas Muennighoff and Xi Victoria Lin and Daniela Rus and Bryan Kian Hsiang Low and Sewon Min and Wen-tau Yih and Pang Wei Koh and Luke Zettlemoyer},
year={2025},
journal={arXiv preprint arXiv:2504.20595},
url={https://arxiv.org/abs/2504.20595},
}
```

39
config.json Normal file
View File

@@ -0,0 +1,39 @@
{
"_name_or_path": "reasonir/ReasonIR-8B",
"architectures": [
"ReasonIRModel"
],
"auto_map": {
"AutoModel": "modeling_reasonir_8b.ReasonIRModel"
},
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 128000,
"eos_token_id": 128001,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 4096,
"initializer_range": 0.02,
"intermediate_size": 14336,
"max_position_embeddings": 131072,
"mlp_bias": false,
"model_type": "llama",
"num_attention_heads": 32,
"num_hidden_layers": 32,
"num_key_value_heads": 8,
"pretraining_tp": 1,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"factor": 8.0,
"high_freq_factor": 4.0,
"low_freq_factor": 1.0,
"original_max_position_embeddings": 8192,
"rope_type": "llama3"
},
"rope_theta": 500000.0,
"tie_word_embeddings": false,
"torch_dtype": "bfloat16",
"transformers_version": "4.47.0.dev0",
"use_cache": true,
"vocab_size": 128256
}

View File

@@ -0,0 +1,10 @@
{
"__version__": {
"sentence_transformers": "4.0.2",
"transformers": "4.48.2",
"pytorch": "2.6.0+cu124"
},
"prompts": {},
"default_prompt_name": null,
"similarity_fn_name": "cosine"
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:88423f543709672c3fb67b1f1e0a2bd9ad974668620ebb79c03748a51e37941c
size 4976698176

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2df5a07920cb43273f20f3944eeb0d1dcfae94b57e124f8a88715563895b25df
size 4999802096

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a5c538338fe73649a6b2d2e1872b271513a2f2f307b3d88067cac1ef0aa692a6
size 4915915576

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fb23fe24cd4ac97ceb0627051f990bd935ee79b0c5fd183b852840d3d55af731
size 117465520

View File

@@ -0,0 +1,297 @@
{
"metadata": {
"total_size": 15009849344
},
"weight_map": {
"embed_tokens.weight": "model-00001-of-00004.safetensors",
"layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
"layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
"layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
"layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"norm.weight": "model-00004-of-00004.safetensors"
}
}

1826
modeling_reasonir_8b.py Normal file

File diff suppressed because it is too large Load Diff

20
modules.json Normal file
View File

@@ -0,0 +1,20 @@
[
{
"idx": 0,
"name": "0",
"path": "",
"type": "sentence_transformers.models.Transformer"
},
{
"idx": 1,
"name": "1",
"path": "1_Pooling",
"type": "sentence_transformers.models.Pooling"
},
{
"idx": 2,
"name": "2",
"path": "2_Normalize",
"type": "sentence_transformers.models.Normalize"
}
]

View File

@@ -0,0 +1,4 @@
{
"max_seq_length": 131072,
"do_lower_case": false
}

17
special_tokens_map.json Normal file
View File

@@ -0,0 +1,17 @@
{
"bos_token": {
"content": "<|begin_of_text|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "<|end_of_text|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": "<|end_of_text|>"
}

3
tokenizer.json Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
size 17209920

2064
tokenizer_config.json Normal file

File diff suppressed because it is too large Load Diff