初始化项目,由ModelHub XC社区提供模型
Model: microsoft/UserLM-8b Source: Original Platform
This commit is contained in:
56
.gitattributes
vendored
Normal file
56
.gitattributes
vendored
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.model filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.db* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ark* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
|
||||||
|
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
|
||||||
|
|
||||||
|
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gguf* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ggml filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.llamafile* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pt2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
|
||||||
|
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00006-of-00007.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00001-of-00007.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00005-of-00007.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00003-of-00007.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00002-of-00007.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00007-of-00007.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
model-00004-of-00007.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
154
README.md
Normal file
154
README.md
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
---
|
||||||
|
license: mit
|
||||||
|
datasets:
|
||||||
|
- allenai/WildChat-1M
|
||||||
|
language:
|
||||||
|
- en
|
||||||
|
base_model:
|
||||||
|
- meta-llama/Llama-3.1-8B
|
||||||
|
pipeline_tag: text-generation
|
||||||
|
tags:
|
||||||
|
- userlm
|
||||||
|
- simulation
|
||||||
|
---
|
||||||
|
|
||||||
|
# microsoft/UserLM-8b model card
|
||||||
|
|
||||||
|
## Model description
|
||||||
|
|
||||||
|
Unlike typical LLMs that are trained to play the role of the "assistant" in conversation, we trained UserLM-8b to simulate the “user” role in conversation (by training it to predict user turns in a large corpus of conversations called WildChat). This model is useful in simulating more realistic conversations, which is in turn useful in the development of more robust assistants.
|
||||||
|
|
||||||
|
The model takes a single input, which is the “task intent”, which defines the high-level objective that the user simulator should pursue. The user can then be used to generate: (1) a first-turn user utterance, (2) generate follow-up user utterances based on a conversation state (one or several user-assistant turn exchanges), and (3) generate a <|endconversation|> token when the user simulator expects that the conversation has run its course.
|
||||||
|
|
||||||
|
Developed by: Tarek Naous (intern at MSR Summer 2025), Philippe Laban (MSR), Wei Xu, Jennifer Neville (MSR)
|
||||||
|
|
||||||
|
Paper: https://arxiv.org/abs/2510.06552
|
||||||
|
|
||||||
|
# How to get started with the model
|
||||||
|
|
||||||
|
Here’s a simple snippet to use the model:
|
||||||
|
```python
|
||||||
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
|
import torch
|
||||||
|
|
||||||
|
# Load the model and tokenizer
|
||||||
|
model_path = "microsoft/UserLM-8b"
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True).to("cuda")
|
||||||
|
|
||||||
|
# Create a conversation
|
||||||
|
messages = [{"role": "system", "content": "You are a user who wants to implement a special type of sequence. The sequence sums up the two previous numbers in the sequence and adds 1 to the result. The first two numbers in the sequence are 1 and 1."}]
|
||||||
|
inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda")
|
||||||
|
|
||||||
|
end_token = "<|eot_id|>"
|
||||||
|
end_token_id = tokenizer.encode(end_token, add_special_tokens=False)
|
||||||
|
|
||||||
|
end_conv_token = "<|endconversation|>"
|
||||||
|
end_conv_token_id = tokenizer.encode(end_conv_token, add_special_tokens=False)
|
||||||
|
|
||||||
|
outputs = model.generate(
|
||||||
|
input_ids=inputs,
|
||||||
|
do_sample=True,
|
||||||
|
top_p=0.8,
|
||||||
|
temperature=1,
|
||||||
|
max_new_tokens=10,
|
||||||
|
eos_token_id=end_token_id,
|
||||||
|
pad_token_id=tokenizer.eos_token_id,
|
||||||
|
bad_words_ids=[[token_id] for token_id in end_conv_token_id]
|
||||||
|
)
|
||||||
|
response = tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True)
|
||||||
|
print(response)
|
||||||
|
```
|
||||||
|
|
||||||
|
# Uses
|
||||||
|
|
||||||
|
## Direct intended uses
|
||||||
|
|
||||||
|
The UserLM-8b is released for use by researchers involved in the evaluation of assistant LLMs. In such scenarios, UserLM-8b can be used to simulate multi-turn conversations, with our analyses (see Section 3 of the paper) giving evidence that UserLM-8b provides more realistic simulation of user behavior than other methods (such as prompting an assistant model). UserLM-8b offers a user simulation environment that can better estimate the performance of an assistant LLM with real users. See Section 4 of the paper for an initial implementation of such an evaluation.
|
||||||
|
|
||||||
|
## Downstream uses
|
||||||
|
|
||||||
|
We envision several potential uses for UserLM-8b that we did not implement yet in our presented work but describe in our Discussion section as potential research directions for UserLMs. These potential applications include: (1) user modeling (i.e., predicting user responses to a given set of questions), (2) foundation for judge models (i.e., LLM-as-a-judge finetuning), (3) synthetic data generation (in conjunction with an assistant LM).
|
||||||
|
|
||||||
|
## Out-of-scope uses
|
||||||
|
|
||||||
|
We caution potential users of the model that UserLM-8b is **not an assistant LM**, unlike the majority of LLMs released on HuggingFace. As such, it is unlikely to be useful to end-users that require assistance with a task, for which an assistant LLM (such as microsoft/Phi-4) might be more appropriate.
|
||||||
|
|
||||||
|
We do not recommend using UserLM in commercial or real-world applications without further testing and development. It is being released for research purposes.
|
||||||
|
|
||||||
|
# Risks and limitations
|
||||||
|
|
||||||
|
The paper accompanying this model release presents several evaluations of UserLM-8b and its potential limitations.
|
||||||
|
|
||||||
|
First in Section 3, we describe the robustness experiments we conducted with UserLM-8b, which show that though the model can more robustly adhere to the user role and the provided task intent, the robustness numbers are not perfect (< 100%), meaning that the UserLM-8b can occasionally get detracted from its user role or its initial task intent.
|
||||||
|
|
||||||
|
Second in Section 4, we describe the possibility for the UserLM-8b to hallucinate additional requirements that are not provided in the task intent. In such cases, we find that the UserLM can introduce new facts or constraints to the task. This can both be beneficial (diversifying simulation conditions) and detrimental (e.g., in cases where the hallucination is incompatible with the task intent). Hallucination mitigation is unfortunately an unsolved research problem, and all generative models (including UserLMs) generate hallucinated text on occasion. One mitigation option is to provide user intents that are as specified as possible, which limits the opportunities for the UserLM to hallucinate task information.
|
||||||
|
|
||||||
|
UserLM was designed and tested using the English language. Performance in other languages may vary and should be assessed by someone who is both an expert in the expected outputs and a native speaker of that language.
|
||||||
|
|
||||||
|
UserLM inherits any biases, errors, or omissions produced by its base model. Developers are advised to choose an appropriate base LLM/MLLM carefully, depending on the intended use case.
|
||||||
|
|
||||||
|
UserLM inherits any biases, errors, or omissions characteristic of its training data, which may be amplified by any AI-generated interpretations.
|
||||||
|
|
||||||
|
There has not been a systematic effort to ensure that systems using UserLM are protected from security vulnerabilities such as indirect prompt injection attacks. Any systems using it should take proactive measures to harden their systems as appropriate.
|
||||||
|
|
||||||
|
|
||||||
|
# Recommendations
|
||||||
|
|
||||||
|
The UserLM-8b is a research release, and it is likely to require some adaptation when adapted to new tasks and environments. In Appendix D.1 of the paper (Generation Configuration for UserLM-8b), we describe four generation guardrails (Filtering First Tokens, Avoiding Dialogue Termination, Maximal and Minimal Length Threshold, and Filter Verbatim Repetitions) we implemented to get the UserLM-8b to effectively simulate user utterances on the use-cases described in our paper. We encourage users of UserLM-8b to adopt and adapt these guardrails in their own use-cases.
|
||||||
|
|
||||||
|
# Training details
|
||||||
|
## Training data
|
||||||
|
We trained on a filtered version of [WildChat-1M](https://huggingface.co/datasets/allenai/WildChat-1M). The details on the filtering and processing are Appendix A and Section 2 of our paper. We do not release any data or processing scripts with our paper, as we believe these are sufficiently detailed in the paper that they can be reimplemented.
|
||||||
|
|
||||||
|
## Training procedure
|
||||||
|
|
||||||
|
We performed full-parameter fine-tuning of Llama3-8b-Base. We used a maximum sequence length of 2048 tokens, a batch size of 1024 samples, and a learning rate of 2e-5. Training was performed on four NVIDIA RTX A6000 GPUs, taking 227 hours to train UserLM-8b. Further details are provided in Section 2.2 of our paper.
|
||||||
|
|
||||||
|
# Evaluation
|
||||||
|
|
||||||
|
## Testing data
|
||||||
|
We evaluated on a held-out set of [WildChat-1M](https://huggingface.co/datasets/allenai/WildChat-1M), as well as [PRISM](https://huggingface.co/paige-ai/Prism). In our extrinsic evaluation, we evaluated using samples from the [Lost in Conversation](https://huggingface.co/datasets/microsoft/lost_in_conversation) sharded instructions.
|
||||||
|
The details for data selection are described in Section 2.2 of the paper.
|
||||||
|
|
||||||
|
## Evaluation results summary
|
||||||
|
|
||||||
|
We evaluate UserLM-8b with three complementary experiments:
|
||||||
|
1. Distributional Alignment (perplexity). We measure the ability of the UserLM-8b to predict (generate) user utterances for a set of test conversations from users that were not included in training. We observe lower perplexity (higher alignment) than prior work, including previously trained models (USP-8b) and prompted assistant models. See Section 2 of the paper for details.
|
||||||
|
2. Instrinsic Evaluation of User Simulators. We define six metrics that correspond to desirable properties of user simulators (for example, ability to end conversation, or shard information across turns). We then compare UserLM-8b to a broad set of methods including trained models, and open-weights and API-based prompted assistant models. We find that UserLM-8b outperforms assistant-based methods on all six metrics. See Section 3 of the paper for details.
|
||||||
|
3. Extrinsic Evaluation of User Simulators. We create a simulation setting for conversations involving the user wanting to either solve mathematics problems, or wanting to code a basic Python programming function. We simulate conversations with various user simulators, including UserLM-8b and prompted assistant models, and find that UserLM-8b leads to more diverse simulation on several levels (conversation pace, lexical choice, information choice), leading to a broader range of simulation, and leading to deteriorated performance from the assistant. See Section 4 of the paper for details.
|
||||||
|
|
||||||
|
|
||||||
|
# Environmental impact
|
||||||
|
Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).
|
||||||
|
- Hardware type: A6000
|
||||||
|
- Hours used: 227*4
|
||||||
|
- Cloud provider: Azure
|
||||||
|
- Compute region: useast
|
||||||
|
- Carbon emitted: 115 kg CO2 (estimate)
|
||||||
|
|
||||||
|
# BibTeX
|
||||||
|
|
||||||
|
```tex
|
||||||
|
@inproceedings{naous2025flipping,
|
||||||
|
title={Flipping the Dialogue: Training and Evaluating User Language Models},
|
||||||
|
author={Tarek Naous and Philippe Laban and Wei Xu and Jennifer Neville},
|
||||||
|
journal={arXiv preprint arXiv:2510.06552},
|
||||||
|
year={2025},
|
||||||
|
url={https://arxiv.org/abs/2510.06552}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
# Glossary
|
||||||
|
|
||||||
|
UserLM = User Language Model
|
||||||
|
|
||||||
|
# Model card contact
|
||||||
|
|
||||||
|
This research was conducted by members of Microsoft Research. We welcome feedback and collaboration from our audience. If you have suggestions, questions, or observe unexpected/offensive behavior in our technology, please contact us at: plaban@microsoft.com
|
||||||
|
|
||||||
|
If the team receives reports of undesired behavior or identifies issues independently, we will update this repository with appropriate mitigations.
|
||||||
|
|
||||||
|
# Privacy
|
||||||
|
|
||||||
|
[Privacy & Cookies](https://www.microsoft.com/en-us/privacy/privacystatement)
|
||||||
2
chat_template.jinja
Normal file
2
chat_template.jinja
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
{% for message in messages %}{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>' }}
|
||||||
|
{{ message['content'] }}<|eot_id|>{% endfor %}{{ '<|start_header_id|>user<|end_header_id|>' }}
|
||||||
29
config.json
Normal file
29
config.json
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{
|
||||||
|
"architectures": [
|
||||||
|
"LlamaForCausalLM"
|
||||||
|
],
|
||||||
|
"attention_bias": false,
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bos_token_id": 128000,
|
||||||
|
"eos_token_id": 128001,
|
||||||
|
"head_dim": 128,
|
||||||
|
"hidden_act": "silu",
|
||||||
|
"hidden_size": 4096,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 14336,
|
||||||
|
"max_position_embeddings": 8192,
|
||||||
|
"mlp_bias": false,
|
||||||
|
"model_type": "llama",
|
||||||
|
"num_attention_heads": 32,
|
||||||
|
"num_hidden_layers": 32,
|
||||||
|
"num_key_value_heads": 8,
|
||||||
|
"pretraining_tp": 1,
|
||||||
|
"rms_norm_eps": 1e-05,
|
||||||
|
"rope_scaling": null,
|
||||||
|
"rope_theta": 500000.0,
|
||||||
|
"tie_word_embeddings": false,
|
||||||
|
"torch_dtype": "float32",
|
||||||
|
"transformers_version": "4.52.4",
|
||||||
|
"use_cache": true,
|
||||||
|
"vocab_size": 128257
|
||||||
|
}
|
||||||
1
configuration.json
Normal file
1
configuration.json
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}
|
||||||
9
generation_config.json
Normal file
9
generation_config.json
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"bos_token_id": 128000,
|
||||||
|
"do_sample": true,
|
||||||
|
"eos_token_id": 128009,
|
||||||
|
"max_length": 4096,
|
||||||
|
"temperature": 0.6,
|
||||||
|
"top_p": 0.9,
|
||||||
|
"transformers_version": "4.52.4"
|
||||||
|
}
|
||||||
3
model-00001-of-00007.safetensors
Normal file
3
model-00001-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:1880b3180ac5705b1090dcb1a1b0534596b0e26920ee6719862f43a550061f97
|
||||||
|
size 4886482552
|
||||||
3
model-00002-of-00007.safetensors
Normal file
3
model-00002-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:f20c8c59d3335893c6d47df04ec04485d6256726c2d4db9748bf0a797e134810
|
||||||
|
size 4832007448
|
||||||
3
model-00003-of-00007.safetensors
Normal file
3
model-00003-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:dbb123190de3e4d9d68eacf88824b12485a5ecb6425a41ffe7e5e588ca7f239c
|
||||||
|
size 4999813112
|
||||||
3
model-00004-of-00007.safetensors
Normal file
3
model-00004-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:0a48e4e365c98db5d59243e3415dee1a401bd90ef07187ad9418917380d857ba
|
||||||
|
size 4999813128
|
||||||
3
model-00005-of-00007.safetensors
Normal file
3
model-00005-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:c94475d3722949a5f5dc093b6f4e02f238612c3bc1833f8bcb1ab6bf14130119
|
||||||
|
size 4832007496
|
||||||
3
model-00006-of-00007.safetensors
Normal file
3
model-00006-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:8c4287a2d0140dc65f2c7f8740bda8423e002cc44849aa48d6b40e9e81a068a0
|
||||||
|
size 4999813120
|
||||||
3
model-00007-of-00007.safetensors
Normal file
3
model-00007-of-00007.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:bef3c70bf4f8b8de9399f10bec977ba4e1b7add47bf14496b292e599528d95d8
|
||||||
|
size 2571174568
|
||||||
298
model.safetensors.index.json
Normal file
298
model.safetensors.index.json
Normal file
@@ -0,0 +1,298 @@
|
|||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"total_size": 32121077760
|
||||||
|
},
|
||||||
|
"weight_map": {
|
||||||
|
"lm_head.weight": "model-00007-of-00007.safetensors",
|
||||||
|
"model.embed_tokens.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.input_layernorm.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.input_layernorm.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.10.input_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.10.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.input_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.11.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.input_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.12.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.input_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.13.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.14.input_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.14.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.14.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.14.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.14.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.14.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.14.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.14.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.14.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.15.input_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.15.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.input_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.16.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.input_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.17.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.input_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.18.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.input_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.mlp.down_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.mlp.gate_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.mlp.up_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.post_attention_layernorm.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.19.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.2.input_layernorm.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.20.input_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.20.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.20.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.20.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.20.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.20.self_attn.k_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.20.self_attn.o_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.20.self_attn.q_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.20.self_attn.v_proj.weight": "model-00004-of-00007.safetensors",
|
||||||
|
"model.layers.21.input_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.21.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.input_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.22.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.input_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.23.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.input_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.mlp.down_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.post_attention_layernorm.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.24.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.25.input_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.25.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.25.mlp.gate_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.25.mlp.up_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.25.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.25.self_attn.k_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.25.self_attn.o_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.25.self_attn.q_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.25.self_attn.v_proj.weight": "model-00005-of-00007.safetensors",
|
||||||
|
"model.layers.26.input_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.26.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.input_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.27.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.input_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.28.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.input_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.29.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.3.input_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.3.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.3.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.3.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.3.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00007.safetensors",
|
||||||
|
"model.layers.30.input_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.mlp.down_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.mlp.up_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.post_attention_layernorm.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.30.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.31.input_layernorm.weight": "model-00007-of-00007.safetensors",
|
||||||
|
"model.layers.31.mlp.down_proj.weight": "model-00007-of-00007.safetensors",
|
||||||
|
"model.layers.31.mlp.gate_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.31.mlp.up_proj.weight": "model-00007-of-00007.safetensors",
|
||||||
|
"model.layers.31.post_attention_layernorm.weight": "model-00007-of-00007.safetensors",
|
||||||
|
"model.layers.31.self_attn.k_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.31.self_attn.o_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.31.self_attn.q_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.31.self_attn.v_proj.weight": "model-00006-of-00007.safetensors",
|
||||||
|
"model.layers.4.input_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.4.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.input_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.5.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.input_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.6.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.input_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.8.input_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.8.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.8.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00007.safetensors",
|
||||||
|
"model.layers.9.input_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.mlp.down_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.mlp.gate_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.mlp.up_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.post_attention_layernorm.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.self_attn.k_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.self_attn.o_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.self_attn.q_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.layers.9.self_attn.v_proj.weight": "model-00003-of-00007.safetensors",
|
||||||
|
"model.norm.weight": "model-00007-of-00007.safetensors"
|
||||||
|
}
|
||||||
|
}
|
||||||
23
special_tokens_map.json
Normal file
23
special_tokens_map.json
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"bos_token": {
|
||||||
|
"content": "<|begin_of_text|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"eos_token": {
|
||||||
|
"content": "<|end_of_text|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"pad_token": {
|
||||||
|
"content": "<|end_of_text|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
||||||
3
tokenizer.json
Normal file
3
tokenizer.json
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:3f9a6668d8b9e32546100cbfea17354443a8d0c2af4693e74690b65389253159
|
||||||
|
size 17210157
|
||||||
2075
tokenizer_config.json
Normal file
2075
tokenizer_config.json
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user