From bf8e997ec0f47ce3c1282e2fb9588b9b1249b562 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Sun, 3 May 2026 02:51:42 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: mncai/MOIS-AWQ-20240319 Source: Original Platform --- .gitattributes | 51 ++ README.md | 201 ++++++ config.json | 41 ++ configuration.json | 1 + configuration_orion.py | 82 +++ generation_config.json | 13 + generation_utils.py | 56 ++ model-00001-of-00002.safetensors | 3 + model-00002-of-00002.safetensors | 3 + model.safetensors.index.json | 1011 +++++++++++++++++++++++++++ modeling_orion.py | 1117 ++++++++++++++++++++++++++++++ special_tokens_map.json | 30 + tokenization_orion.py | 269 +++++++ tokenizer.model | 3 + tokenizer_config.json | 45 ++ 15 files changed, 2926 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 config.json create mode 100644 configuration.json create mode 100644 configuration_orion.py create mode 100644 generation_config.json create mode 100644 generation_utils.py create mode 100644 model-00001-of-00002.safetensors create mode 100644 model-00002-of-00002.safetensors create mode 100644 model.safetensors.index.json create mode 100644 modeling_orion.py create mode 100644 special_tokens_map.json create mode 100644 tokenization_orion.py create mode 100644 tokenizer.model create mode 100644 tokenizer_config.json diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..04a2cc2 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,51 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bin.* filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text + +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zstandard filter=lfs diff=lfs merge=lfs -text +*.tfevents* filter=lfs diff=lfs merge=lfs -text +*.db* filter=lfs diff=lfs merge=lfs -text +*.ark* filter=lfs diff=lfs merge=lfs -text +**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text +**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text +**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text + +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.gguf* filter=lfs diff=lfs merge=lfs -text +*.ggml filter=lfs diff=lfs merge=lfs -text +*.llamafile* filter=lfs diff=lfs merge=lfs -text +*.pt2 filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text + +model-00001-of-00002.safetensors filter=lfs diff=lfs merge=lfs -text +model-00002-of-00002.safetensors filter=lfs diff=lfs merge=lfs -text +tokenizer.model filter=lfs diff=lfs merge=lfs -text \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..cacd7c3 --- /dev/null +++ b/README.md @@ -0,0 +1,201 @@ +--- +library_name: transformers +tags: [] +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + +This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + diff --git a/config.json b/config.json new file mode 100644 index 0000000..3502044 --- /dev/null +++ b/config.json @@ -0,0 +1,41 @@ +{ + "_name_or_path": "/opt/storage/final_model/Orion-AWQ-2", + "architectures": [ + "OrionForCausalLM" + ], + "attention_bias": false, + "auto_map": { + "AutoConfig": "configuration_orion.OrionConfig", + "AutoModelForCausalLM": "modeling_orion.OrionForCausalLM" + }, + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 5120, + "initializer_range": 0.02, + "intermediate_size": 15360, + "max_position_embeddings": 4096, + "max_sequence_length": 4096, + "model_type": "orion", + "num_attention_heads": 40, + "num_hidden_layers": 40, + "num_key_value_heads": 40, + "pad_token_id": 0, + "pretraining_tp": 1, + "quantization_config": { + "bits": 4, + "group_size": 128, + "modules_to_not_convert": null, + "quant_method": "awq", + "version": "gemm", + "zero_point": true + }, + "rms_norm_eps": 1e-05, + "rope_scaling": null, + "rope_theta": 10000.0, + "tie_word_embeddings": false, + "torch_dtype": "float16", + "transformers_version": "4.37.0", + "use_cache": false, + "vocab_size": 84608 +} diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..bbeeda1 --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "text-generation", "allow_remote": true} \ No newline at end of file diff --git a/configuration_orion.py b/configuration_orion.py new file mode 100644 index 0000000..e42a5a4 --- /dev/null +++ b/configuration_orion.py @@ -0,0 +1,82 @@ +# Copyright (c) 2024, OrionStar Inc. All rights reserved. + +from transformers import PretrainedConfig + +class OrionConfig(PretrainedConfig): + model_type = "orion" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=84608, + hidden_size=4096, + intermediate_size=15360, + num_hidden_layers=40, + num_attention_heads=40, + num_key_value_heads=40, + hidden_act="silu", + max_position_embeddings=4096, + initializer_range=0.02, + rms_norm_eps=1e-5, + use_cache=True, + pad_token_id=None, + bos_token_id=1, + eos_token_id=2, + pretraining_tp=1, + tie_word_embeddings=False, + rope_theta=10000.0, + rope_scaling=None, + attention_bias=False, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.pretraining_tp = pretraining_tp + self.use_cache = use_cache + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + self._rope_scaling_validation() + self.attention_bias = attention_bias + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + def _rope_scaling_validation(self): + """ + Validate the `rope_scaling` configuration. + """ + if self.rope_scaling is None: + return + + if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: + raise ValueError( + "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " + f"got {self.rope_scaling}" + ) + rope_scaling_type = self.rope_scaling.get("type", None) + rope_scaling_factor = self.rope_scaling.get("factor", None) + if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: + raise ValueError( + f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" + ) + if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: + raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}") + diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..6ae156d --- /dev/null +++ b/generation_config.json @@ -0,0 +1,13 @@ +{ + "_from_model_config": true, + "bos_token_id": 1, + "do_sample": true, + "eos_token_id": 2, + "max_new_tokens": 1024, + "pad_token_id": 0, + "repetition_penalty": 1.05, + "temperature": 0.3, + "top_k": 5, + "top_p": 0.9, + "transformers_version": "4.37.0" +} diff --git a/generation_utils.py b/generation_utils.py new file mode 100644 index 0000000..4e2f3e8 --- /dev/null +++ b/generation_utils.py @@ -0,0 +1,56 @@ +from typing import List +from queue import Queue + +# build chat input prompt +def build_chat_input(tokenizer, messages: List[dict]): + # chat format: + # single-turn: Human: Hello!\n\nAssistant: + # multi-turn: Human: Hello!\n\nAssistant: Hi!Human: How are you?\n\nAssistant: I'm fine + + prompt = "" + for msg in messages: + role = msg["role"] + message = msg["content"] + if message is None : + continue + if role == "user": + prompt += "Human: " + message + "\n\nAssistant: " + if role == "assistant": + prompt += message + "" + + input_tokens = tokenizer.encode(prompt) + return input_tokens + + +class TextIterStreamer: + def __init__(self, tokenizer, skip_prompt=False, skip_special_tokens=False): + self.tokenizer = tokenizer + self.skip_prompt = skip_prompt + self.skip_special_tokens = skip_special_tokens + self.tokens = [] + self.text_queue = Queue() + self.next_tokens_are_prompt = True + + def put(self, value): + if self.skip_prompt and self.next_tokens_are_prompt: + self.next_tokens_are_prompt = False + else: + if len(value.shape) > 1: + value = value[0] + self.tokens.extend(value.tolist()) + self.text_queue.put( + self.tokenizer.decode(self.tokens, skip_special_tokens=self.skip_special_tokens)) + + def end(self): + self.text_queue.put(None) + + def __iter__(self): + return self + + def __next__(self): + value = self.text_queue.get() + if value is None: + raise StopIteration() + else: + return value + diff --git a/model-00001-of-00002.safetensors b/model-00001-of-00002.safetensors new file mode 100644 index 0000000..f818332 --- /dev/null +++ b/model-00001-of-00002.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b951cdf59fd01e9bb5799d237e2104a888c7022efcc54eb14a1f67e5a831db6 +size 4994012400 diff --git a/model-00002-of-00002.safetensors b/model-00002-of-00002.safetensors new file mode 100644 index 0000000..c3187a5 --- /dev/null +++ b/model-00002-of-00002.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c23ad0e62c04d60f32bbde4caf2e4f0abab78e4444bda2bbb36a5c9d2a5cf3d9 +size 3822516432 diff --git a/model.safetensors.index.json b/model.safetensors.index.json new file mode 100644 index 0000000..5442173 --- /dev/null +++ b/model.safetensors.index.json @@ -0,0 +1,1011 @@ +{ + "metadata": { + "total_size": 8816414720 + }, + "weight_map": { + "lm_head.weight": "model-00002-of-00002.safetensors", + "model.embed_tokens.weight": "model-00001-of-00002.safetensors", + "model.layers.0.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.0.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.0.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.0.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.0.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.0.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.0.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.0.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.0.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.0.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.0.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.1.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.1.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.1.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.1.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.1.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.1.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.1.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.1.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.1.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.1.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.1.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.10.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.10.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.10.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.10.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.10.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.10.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.10.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.10.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.10.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.10.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.10.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.11.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.11.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.11.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.11.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.11.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.11.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.11.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.11.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.11.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.11.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.11.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.12.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.12.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.12.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.12.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.12.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.12.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.12.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.12.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.12.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.12.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.12.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.13.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.13.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.13.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.13.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.13.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.13.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.13.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.13.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.13.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.13.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.13.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.14.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.14.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.14.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.14.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.14.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.14.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.14.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.14.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.14.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.14.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.14.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.15.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.15.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.15.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.15.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.15.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.15.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.15.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.15.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.15.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.15.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.15.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.16.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.16.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.16.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.16.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.16.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.16.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.16.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.16.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.16.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.16.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.16.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.17.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.17.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.17.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.17.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.17.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.17.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.17.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.17.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.17.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.17.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.17.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.18.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.18.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.18.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.18.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.18.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.18.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.18.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.18.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.18.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.18.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.18.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.19.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.19.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.19.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.19.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.19.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.19.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.19.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.19.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.19.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.19.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.19.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.2.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.2.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.2.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.2.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.2.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.2.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.2.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.2.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.2.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.2.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.2.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.20.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.20.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.20.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.20.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.20.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.20.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.20.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.20.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.20.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.20.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.20.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.21.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.21.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.21.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.21.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.21.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.21.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.21.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.21.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.21.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.21.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.21.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.22.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.22.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.22.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.22.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.22.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.22.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.22.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.22.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.22.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.22.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.22.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.23.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.23.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.23.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.23.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.23.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.23.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.23.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.23.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.23.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.23.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.23.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.23.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.24.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.24.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.24.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.24.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.24.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.24.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.24.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.24.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.24.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.24.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.24.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.24.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.24.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.24.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.24.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.24.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.24.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.24.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.24.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.24.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.24.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.24.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.24.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.25.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.25.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.25.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.25.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.25.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.25.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.25.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.25.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.25.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.25.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.25.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.25.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.25.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.25.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.25.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.25.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.25.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.25.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.25.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.25.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.25.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.25.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.25.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.26.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.26.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.26.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.26.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.26.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.26.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.26.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.26.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.26.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.26.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.26.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.26.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.26.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.26.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.26.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.26.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.26.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.26.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.26.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.26.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.26.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.26.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.26.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.27.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.27.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.27.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.27.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.27.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.27.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.27.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.27.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.27.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.27.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.27.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.27.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.27.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.27.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.27.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.27.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.27.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.27.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.27.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.27.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.27.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.27.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.27.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.28.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.28.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.28.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.28.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.28.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.28.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.28.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.28.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.28.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.28.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.28.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.28.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.28.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.28.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.28.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.28.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.28.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.28.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.28.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.28.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.28.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.28.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.28.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.29.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.29.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.29.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.29.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.29.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.29.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.29.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.29.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.29.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.29.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.29.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.29.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.29.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.29.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.29.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.29.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.29.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.29.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.29.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.29.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.29.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.29.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.29.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.3.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.3.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.3.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.3.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.3.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.3.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.3.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.3.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.3.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.3.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.3.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.30.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.30.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.30.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.30.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.30.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.30.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.30.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.30.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.30.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.30.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.30.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.30.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.30.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.30.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.30.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.30.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.30.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.30.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.30.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.30.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.30.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.30.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.30.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.31.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.31.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.31.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.31.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.31.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.31.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.31.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.31.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.31.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.31.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.31.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.31.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.31.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.31.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.31.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.31.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.31.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.31.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.31.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.31.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.31.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.31.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.31.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.32.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.32.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.32.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.32.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.32.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.32.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.32.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.32.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.32.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.32.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.32.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.32.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.32.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.32.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.32.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.32.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.32.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.32.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.32.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.32.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.32.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.32.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.32.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.33.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.33.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.33.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.33.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.33.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.33.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.33.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.33.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.33.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.33.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.33.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.33.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.33.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.33.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.33.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.33.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.33.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.33.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.33.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.33.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.33.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.33.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.33.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.34.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.34.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.34.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.34.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.34.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.34.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.34.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.34.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.34.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.34.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.34.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.34.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.34.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.34.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.34.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.34.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.34.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.34.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.34.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.34.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.34.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.34.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.34.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.34.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.34.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.35.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.35.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.35.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.35.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.35.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.35.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.35.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.35.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.35.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.35.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.35.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.35.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.35.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.35.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.35.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.35.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.35.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.35.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.35.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.35.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.35.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.35.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.35.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.36.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.36.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.36.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.36.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.36.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.36.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.36.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.36.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.36.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.36.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.36.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.36.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.36.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.36.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.36.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.36.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.36.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.36.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.36.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.36.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.36.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.36.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.36.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.36.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.36.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.37.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.37.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.37.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.37.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.37.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.37.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.37.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.37.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.37.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.37.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.37.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.37.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.37.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.37.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.37.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.37.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.37.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.37.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.37.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.37.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.37.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.37.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.37.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.37.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.37.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.38.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.38.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.38.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.38.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.38.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.38.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.38.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.38.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.38.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.38.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.38.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.38.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.38.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.38.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.38.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.38.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.38.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.38.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.38.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.38.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.38.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.38.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.38.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.38.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.38.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.39.input_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.39.input_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.39.mlp.down_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.39.mlp.down_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.39.mlp.down_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.39.mlp.gate_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.39.mlp.gate_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.39.mlp.gate_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.39.mlp.up_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.39.mlp.up_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.39.mlp.up_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.39.post_attention_layernorm.bias": "model-00002-of-00002.safetensors", + "model.layers.39.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", + "model.layers.39.self_attn.k_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.39.self_attn.k_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.39.self_attn.k_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.39.self_attn.o_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.39.self_attn.o_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.39.self_attn.o_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.39.self_attn.q_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.39.self_attn.q_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.39.self_attn.q_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.39.self_attn.v_proj.qweight": "model-00002-of-00002.safetensors", + "model.layers.39.self_attn.v_proj.qzeros": "model-00002-of-00002.safetensors", + "model.layers.39.self_attn.v_proj.scales": "model-00002-of-00002.safetensors", + "model.layers.4.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.4.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.4.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.4.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.4.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.4.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.4.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.4.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.4.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.4.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.4.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.5.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.5.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.5.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.5.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.5.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.5.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.5.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.5.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.5.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.5.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.5.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.6.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.6.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.6.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.6.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.6.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.6.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.6.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.6.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.6.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.6.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.6.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.7.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.7.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.7.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.7.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.7.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.7.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.7.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.7.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.7.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.7.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.7.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.8.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.8.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.8.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.8.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.8.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.8.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.8.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.8.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.8.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.8.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.8.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.9.input_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.9.mlp.down_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.9.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.9.mlp.down_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.9.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.9.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.9.mlp.gate_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.9.mlp.up_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.9.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.9.mlp.up_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.9.post_attention_layernorm.bias": "model-00001-of-00002.safetensors", + "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.k_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.o_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.q_proj.scales": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.v_proj.scales": "model-00001-of-00002.safetensors", + "model.norm.bias": "model-00002-of-00002.safetensors", + "model.norm.weight": "model-00002-of-00002.safetensors" + } +} diff --git a/modeling_orion.py b/modeling_orion.py new file mode 100644 index 0000000..1ff79d4 --- /dev/null +++ b/modeling_orion.py @@ -0,0 +1,1117 @@ +# Copyright 2024 OrionStar Inc. team. All rights reserved. +# Copied and adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py + +from transformers import AutoConfig, AutoModel + +from .configuration_orion import OrionConfig + +import numbers +import importlib +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from torch.nn.parameter import Parameter +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from torch.nn import init + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_available, + logging, + replace_return_docstrings, +) +from .generation_utils import build_chat_input, TextIterStreamer +from transformers.generation.utils import GenerationConfig +from threading import Thread + +if is_flash_attn_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "OrionConfig" + +def _get_unpad_data(padding_mask): + seqlens_in_batch = padding_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(padding_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + +class OrionRotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), + ) + + +class OrionLinearScalingRotaryEmbedding(OrionRotaryEmbedding): + """OrionRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + t = t / self.scaling_factor + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + +class OrionDynamicNTKScalingRotaryEmbedding(OrionRotaryEmbedding): + """OrionRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + + if seq_len > self.max_position_embeddings: + base = self.base * ( + (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) + ) ** (self.dim / (self.dim - 2)) + inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +# Copied from transformers.models.gpt_neox.modeling_gpt_neox.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, position_ids): + cos = cos[position_ids].unsqueeze(1) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim] + sin = sin[position_ids].unsqueeze(1) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class OrionMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + if self.config.pretraining_tp > 1: + slice = self.intermediate_size // self.config.pretraining_tp + gate_proj_slices = self.gate_proj.weight.split(slice, dim=0) + up_proj_slices = self.up_proj.weight.split(slice, dim=0) + down_proj_slices = self.down_proj.weight.split(slice, dim=1) + + gate_proj = torch.cat( + [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1 + ) + up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1) + + intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2) + down_proj = [ + F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp) + ] + down_proj = sum(down_proj) + else: + down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + return down_proj + + +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class OrionAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: OrionConfig): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias) + self._init_rope() + + def _init_rope(self): + if self.config.rope_scaling is None: + self.rotary_emb = OrionRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + scaling_type = self.config.rope_scaling["type"] + scaling_factor = self.config.rope_scaling["factor"] + if scaling_type == "linear": + self.rotary_emb = OrionLinearScalingRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + elif scaling_type == "dynamic": + self.rotary_emb = OrionDynamicNTKScalingRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + else: + raise ValueError(f"Unknown RoPE scaling type {scaling_type}") + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + if self.config.pretraining_tp > 1: + key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp + query_slices = self.q_proj.weight.split( + (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0 + ) + key_slices = self.k_proj.weight.split(key_value_slicing, dim=0) + value_slices = self.v_proj.weight.split(key_value_slicing, dim=0) + + query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)] + query_states = torch.cat(query_states, dim=-1) + + key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)] + key_states = torch.cat(key_states, dim=-1) + + value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)] + value_states = torch.cat(value_states, dim=-1) + + else: + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + if self.config.pretraining_tp > 1: + attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2) + o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1) + attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)]) + else: + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class OrionFlashAttention2(OrionAttention): + """ + Orion flash attention module. This module inherits from `OrionAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + # OrionFlashAttention2 attention does not support output_attentions + output_attentions = False + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dime x hidden_dim + # therefore we just need to keep the original shape + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + # TODO: llama does not have dropout in the config?? + # It is recommended to use dropout with FA according to the docs + # when training. + dropout_rate = 0.0 # if not self.training else self.attn_dropout + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in float16 just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + input_dtype = query_states.dtype + if input_dtype == torch.float32: + logger.warning_once( + "The input hidden states seems to be silently casted in float32, this might be related to" + " the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + " float16." + ) + + query_states = query_states.to(torch.float16) + key_states = key_states.to(torch.float16) + value_states = value_states.to(torch.float16) + + attn_output = self._flash_attention_forward( + query_states, key_states, value_states, padding_mask, q_len, dropout=dropout_rate + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + def _flash_attention_forward( + self, query_states, key_states, value_states, padding_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + padding_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + # Contains at least one padding token in the sequence + if padding_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, padding_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=True, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=True + ) + + return attn_output + + def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(padding_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + padding_mask = padding_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, padding_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +class OrionDecoderLayer(nn.Module): + def __init__(self, config: OrionConfig): + super().__init__() + self.hidden_size = config.hidden_size + self.self_attn = ( + OrionAttention(config=config) + if not getattr(config, "_flash_attn_2_enabled", False) + else OrionFlashAttention2(config=config) + ) + self.mlp = OrionMLP(config) + self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + padding_mask: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + +class OrionPreTrainedModel(PreTrainedModel): + config_class = OrionConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["OrionDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, OrionModel): + module.gradient_checkpointing = value + +class OrionModel(OrionPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OrionDecoderLayer`] + + Args: + config: OrionConfig + """ + + def __init__(self, config: OrionConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList([OrionDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.norm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + # embed positions + if attention_mask is None: + attention_mask = torch.ones( + (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device + ) + padding_mask = None + else: + if 0 in attention_mask: + padding_mask = attention_mask + else: + padding_mask = None + + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, past_key_value, output_attentions, padding_mask=padding_mask) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), hidden_states, attention_mask, position_ids + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class OrionForCausalLM(OrionPreTrainedModel): + model_type = "orion" + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = OrionModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, OrionForCausalLM + + >>> model = OrionForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + if self.config.pretraining_tp > 1: + lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0) + logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)] + logits = torch.cat(logits, dim=-1) + else: + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def chat(self, tokenizer, messages: List[dict], streaming=False,generation_config: Optional[GenerationConfig]=None): + generation_config = generation_config or self.generation_config + input_tokens = build_chat_input(tokenizer,messages) + input_ids = torch.LongTensor([input_tokens]).to(self.device) + + if streaming: + streamer = TextIterStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) + Thread(target=self.generate, kwargs=dict( + inputs=input_ids, streamer=streamer, + generation_config=generation_config, + )).start() + return streamer + else: + outputs = self.generate(input_ids, generation_config=generation_config) + response = tokenizer.decode(outputs[0][len(input_ids[0]):], skip_special_tokens=True) + return response + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + if past_key_values: + input_ids = input_ids[:, -1:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -1].unsqueeze(-1) + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + +class OrionForSequenceClassification(OrionPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = OrionModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + logits.device + ) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..5819ea2 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,30 @@ +{ + "bos_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "unk_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenization_orion.py b/tokenization_orion.py new file mode 100644 index 0000000..6ef4197 --- /dev/null +++ b/tokenization_orion.py @@ -0,0 +1,269 @@ +# Copyright (c) 2024, OrionStar Inc. All rights reserved. + +import os +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple +import re + +import sentencepiece as spm +from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer + + +VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": {}, + "tokenizer_file": {}, +} +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {} + + +class OrionTokenizer(PreTrainedTokenizer): + """ + Construct a Orion tokenizer. Based on byte-level Byte-Pair-Encoding. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + unk_token="", + bos_token="", + eos_token="", + pad_token=None, + sp_model_kwargs: Optional[Dict[str, Any]] = None, + add_bos_token=True, + add_eos_token=False, + clean_up_tokenization_spaces=False, + **kwargs, + ): + self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs + bos_token = ( + AddedToken(bos_token, lstrip=False, rstrip=False) + if isinstance(bos_token, str) + else bos_token + ) + eos_token = ( + AddedToken(eos_token, lstrip=False, rstrip=False) + if isinstance(eos_token, str) + else eos_token + ) + unk_token = ( + AddedToken(unk_token, lstrip=False, rstrip=False) + if isinstance(unk_token, str) + else unk_token + ) + pad_token = ( + AddedToken(pad_token, lstrip=False, rstrip=False) + if isinstance(pad_token, str) + else pad_token + ) + self.vocab_file = vocab_file + self.add_bos_token = add_bos_token + self.add_eos_token = add_eos_token + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(vocab_file) + + super().__init__( + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + pad_token=pad_token, + add_bos_token=add_bos_token, + add_eos_token=add_eos_token, + sp_model_kwargs=self.sp_model_kwargs, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + **kwargs, + ) + + def __getstate__(self): + state = self.__dict__.copy() + state["sp_model"] = None + return state + + def __setstate__(self, d): + self.__dict__ = d + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(self.vocab_file) + + @property + def vocab_size(self): + """Returns vocab size""" + return self.sp_model.get_piece_size() + + def get_vocab(self): + """Returns vocab as a dict""" + vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def _tokenize(self, text): + """Returns a tokenized string.""" + return self.sp_model.encode(text, out_type=str) + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.sp_model.piece_to_id(token) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + token = self.sp_model.IdToPiece(index) + return token + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + zhPattern = re.compile(u'[\u4e00-\u9fa5]+') + need_convert_punctuation=(",",";","!","?",":","(",")") + current_sub_tokens = [] + out_string = "" + prev_is_special = False + for i, token in enumerate(tokens): + # make sure that special tokens are not decoded using sentencepiece model + if token in self.all_special_tokens: + if not prev_is_special and i != 0: + out_string += " " + out_string += self.sp_model.decode(current_sub_tokens) + token + prev_is_special = True + current_sub_tokens = [] + if any([True if punctuation in token else False for punctuation in need_convert_punctuation]): + out_string += self.sp_model.decode(current_sub_tokens) + token=self.sp_model.decode(token) + if zhPattern.search(out_string[-20:]): + token = self.to_zh_punctuation(token) + out_string += token + current_sub_tokens = [] + else: + current_sub_tokens.append(token) + prev_is_special = False + out_string += self.sp_model.decode(current_sub_tokens) + return out_string + + def to_zh_punctuation(self, token): + return token.replace(",",",").replace(";",";").replace("!","!").replace("?","?").replace(":",":").replace("(","(").replace(")",")") + + def save_vocabulary( + self, save_directory, filename_prefix: Optional[str] = None + ) -> Tuple[str]: + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + out_vocab_file = os.path.join( + save_directory, + (filename_prefix + "-" if filename_prefix else "") + + VOCAB_FILES_NAMES["vocab_file"], + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath( + out_vocab_file + ) and os.path.isfile(self.vocab_file): + copyfile(self.vocab_file, out_vocab_file) + elif not os.path.isfile(self.vocab_file): + with open(out_vocab_file, "wb") as fi: + content_spiece_model = self.sp_model.serialized_model_proto() + fi.write(content_spiece_model) + + return (out_vocab_file,) + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = bos_token_id + token_ids_0 + eos_token_id + + if token_ids_1 is not None: + output = output + bos_token_id + token_ids_1 + eos_token_id + + return output + + def get_special_tokens_mask( + self, + token_ids_0: List[int], + token_ids_1: Optional[List[int]] = None, + already_has_special_tokens: bool = False, + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, + token_ids_1=token_ids_1, + already_has_special_tokens=True, + ) + + bos_token_id = [1] if self.add_bos_token else [] + eos_token_id = [1] if self.add_eos_token else [] + + if token_ids_1 is None: + return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + return ( + bos_token_id + + ([0] * len(token_ids_0)) + + eos_token_id + + bos_token_id + + ([0] * len(token_ids_1)) + + eos_token_id + ) + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT + sequence pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + if token_ids_1 is None, only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of ids. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) + + if token_ids_1 is not None: + output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) + + return output diff --git a/tokenizer.model b/tokenizer.model new file mode 100644 index 0000000..4eca1a9 --- /dev/null +++ b/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ded43118b7418f56db97a4eed08a5c265c03120158229ddd4fbcc9658241d5f0 +size 1520600 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..2647667 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,45 @@ +{ + "add_bos_token": false, + "add_eos_token": false, + "added_tokens_decoder": { + "0": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "auto_map": { + "AutoTokenizer": [ + "tokenization_orion.OrionTokenizer", + null + ] + }, + "bos_token": "", + "chat_template": "{% for message in messages %}{% if loop.first %}{{ bos_token }}{% endif %}{% if message['role'] == 'user' %}{{ 'Human: ' + message['content'] + '\n\nAssistant: ' + eos_token }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token }}{% endif %}{% endfor %}", + "clean_up_tokenization_spaces": false, + "eos_token": "", + "model_max_length": 4096, + "pad_token": "", + "sp_model_kwargs": {}, + "tokenizer_class": "OrionTokenizer", + "unk_token": "" +}