commit 849ab1ca9060e40d909ab8bca5843faa71d44f04 Author: ModelHub XC Date: Thu Apr 23 23:31:04 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: jondurbin/airoboros-33b-2.1 Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..a6344aa --- /dev/null +++ b/.gitattributes @@ -0,0 +1,35 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..6b88c96 --- /dev/null +++ b/README.md @@ -0,0 +1,338 @@ +--- +license: cc-by-nc-4.0 +datasets: +- jondurbin/airoboros-2.1 +--- + +### Overview + +This is an instruction fine-tuned llama 30b model, using synthetic data generated by [airoboros](https://github.com/jondurbin/airoboros) + +- Experimental RP style instruction set, with two categories: rp and gtkm + - rp includes multi-round chats, with emotes, between a varying number of characters, defined by cards + - gtkm is a way to test a simpler alternative to ghost attention - first, a character card is generated, then several questions are created to ask the model (as the character), using the character system prompt, then everything in synthesized into a dialog (one system prompt, all turns remain in character) +- Experimental support for longer, more detailed writing prompts, as well as next-chapter generation +- I used the new `cull-instructions` entrypoint in airoboros to shrink the m2.0 dataset to a smaller subset of high-quality instructions (according to gpt-4) +- The training data now also includes "stylized_response", in which 1500 sample instructions from various categories were re-generated using character cards as system prompts. + - this should allow better adherence to style/etc. specified in the system card +- Thousands of new generations, using some of the updates re: Flesch hints, etc., to get longer/higher quality writing outputs. +- A small "de-alignment" dataset was also added (not published) to remove some of the censorship in the base models. + +I used rope scaling to increase context length to 4096, but there wasn't a huge amount of data in that range so YMMV. You may need to rename the directory to `airoboros-33b-2-1` (anything without the `.` really) so the trust_remote_code=True finds the module path properly. + +*Why do I try to remove censorship?* + +- laws vary widely based on time and location +- language model may conflate certain words with laws, e.g. it may think "stealing eggs from a chicken" is illegal +- these models just produce text, what you do with that text is your resonsibility +- many people and industries deal with "sensitive" content; imagine if a court stenographer's equipment filtered illegal content - it would be useless + +Huge thank you to the folks over at [a16z](https://a16z.com/) for sponsoring the costs associated with building models and associated tools! + +### Prompt format + +The training code was updated to randomize newline vs space: +https://github.com/jondurbin/qlora/blob/main/qlora.py#L559C1-L559C1 + + +``` +A chat. USER: {prompt} ASSISTANT: +``` + +or + +``` +A chat. +USER: {prompt} +ASSISTANT: +``` + +So in other words, it's the preamble/system prompt, followed by a single space or newline, then "USER: " (single space after colon) then the prompt (which can have multiple lines, spaces, whatever), then a single space or newline, followed by "ASSISTANT: " (with a single space after the colon). + +__*I strongly suggest adding stopping criteria/early inference stopping on "USER:", because the training data includes many multi-round chats and could otherwise start simulating a conversation!*__ + +### Helpful usage tips + +*The prompts shown here are are just the text that would be included after USER: and before ASSISTANT: in the full prompt format above, the system prompt and USER:/ASSISTANT: have been omited for readability.* + +#### Context obedient question answering + +By obedient, I mean the model was trained to ignore what it thinks it knows, and uses the context to answer the question. The model was also tuned to limit the values to the provided context as much as possible to reduce hallucinations. + +The format for a closed-context prompt is as follows: +``` +BEGININPUT +BEGINCONTEXT +[key0: value0] +[key1: value1] +... other metdata ... +ENDCONTEXT +[insert your text blocks here] +ENDINPUT +[add as many other blocks, in the exact same format] +BEGININSTRUCTION +[insert your instruction(s). The model was tuned with single questions, paragraph format, lists, etc.] +ENDINSTRUCTION +``` + +It's also helpful to add "Don't make up answers if you don't know." to your instruction block to make sure if the context is completely unrelated it doesn't make something up. + +*The __only__ prompts that need this closed context formating are closed-context instructions. Normal questions/instructions do not!* + +I know it's a bit verbose and annoying, but after much trial and error, using these explicit delimiters helps the model understand where to find the responses and how to associate specific sources with it. +- `BEGININPUT` - denotes a new input block +- `BEGINCONTEXT` - denotes the block of context (metadata key/value pairs) to associate with the current input block +- `ENDCONTEXT` - denotes the end of the metadata block for the current input +- [text] - Insert whatever text you want for the input block, as many paragraphs as can fit in the context. +- `ENDINPUT` - denotes the end of the current input block +- [repeat as many input blocks in this format as you want] +- `BEGININSTRUCTION` - denotes the start of the list (or one) instruction(s) to respond to for all of the input blocks above. +- [instruction(s)] +- `ENDINSTRUCTION` - denotes the end of instruction set + +It sometimes works without `ENDINSTRUCTION`, but by explicitly including that in the prompt, the model better understands that all of the instructions in the block should be responded to. + +Here's a trivial, but important example to prove the point: +``` +BEGININPUT +BEGINCONTEXT +date: 2021-01-01 +url: https://web.site/123 +ENDCONTEXT +In a shocking turn of events, blueberries are now green, but will be sticking with the same name. +ENDINPUT +BEGININSTRUCTION +What color are bluberries? Source? +ENDINSTRUCTION +``` + +And the response: +``` +Blueberries are now green. +Source: +date: 2021-01-01 +url: https://web.site/123 +``` + +#### Coding + +You can ask for fairly complex coding instructions with multiple criteria, e.g.: + +``` +Create a python application with the following requirements: +- Asyncio FastAPI webserver +- ping endpoint that returns the current date in JSON format +- file upload endpoint, which calculates the file's sha256 checksum, and checks postgres to deduplicate +``` + +Or inline criteria: + +``` +Write a multi-threaded TCP server in C that accepts a "GET [key]" input and "SET [key] [value]" input, and uses a binary tree to get and store the input values. +``` + +You can also optionally add a single space and "PLAINFORMAT" at the end of your prompt to avoid backticks, explanations, etc. and just print the code, e.g.: + +``` +Write a websocket application in node.js. PLAINFORMAT +``` + +#### Agent/function calling + +The dataset includes many examples of function/args generation based on input criteria. This is somewhat similar to the OpenAI function calling, but the output is either JSON or YAML. + +Example prompt: +``` +As an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format. + +Input: I want to know how many times 'Python' is mentioned in my text file. + +Available functions: +file_analytics: + description: This tool performs various operations on a text file. + params: + action: The operation we want to perform on the data, such as "count_occurrences", "find_line", etc. + filters: + keyword: The word or phrase we want to search for. +``` + +Response: +```json +{ + "function": "file_analytics", + "params": { + "action": "count_occurrences", + "filters": { + "keyword": "Python" + } + } +} +``` + +#### Chain-of-thought + +You can ask for several possible responses to a given problem, with a ranking and final answer selection. + +Example prompt: + +``` +A group of people decide to play a round-robin tournament where every player plays every other player exactly once. If a total of 45 games were played, how many players participated in the tournament? Offer several potential answers, rank them according to mathematical logic, and pick the most feasible one. +``` + +Example response: +``` +Solution 1: +In a round-robin tournament, each player plays every other player exactly once. This means that the total number of games played is equal to the combination of all players taken two at a time (since a game involves two players). + +The formula for combinations is nC2 = n(n-1)/2, where n is the total number of players. We know that nC2 equals 45 (the total number of games), so we can set up the equation as follows: + +n(n-1)/2 = 45 + +Solving this equation gives us n=10. + +Final answer: There were 10 players in the tournament. + +Solution 2: +Another way to approach this problem is by using the concept of triangular numbers. Triangular numbers are a sequence of numbers where each number can be represented as a triangle with dots. The nth triangular number is given by the sum of the first n natural numbers. + +If we consider each game as a dot and each player as a side of the triangle, then the total number of games (45) would correspond to the 9th triangular number because 1+2+3+4+5+6+7+8+9=45. However, since each side of the triangle represents a player, and there's one more player than sides in our model (because the last player has no one left to play against), we need to add one more to get the total number of players. + +Final answer: There were 10 players in the tournament. + +Solution 3: +We could also use trial and error to solve this problem. Starting from a small number of players, we calculate how many games would be played in a round-robin tournament until we reach 45 games. For example, with 2 players, only 1 game is played; with 3 players, 3 games are played; with 4 players, 6 games are played, and so on. Continuing this process, we find that with 10 players, 45 games are played. + +Final answer: There were 10 players in the tournament. + +Ranking of solutions from best to worst: +1. Solution 1: This solution uses direct mathematical reasoning based on the properties of a round-robin tournament and solves an equation to find the answer. +2. Solution 2: This solution uses the concept of triangular numbers which requires additional steps and knowledge beyond the immediate problem context, but still arrives at the correct answer. +3. Solution 3: This solution relies on trial and error, which is less efficient and reliable than the other methods, but it eventually gets to the right answer. + +Best and final answer: There were 10 players in the tournament. +``` + +#### reWOO style execution planning + +The model now supports execution planning for complex instructions that would require making use of several tools. The output is just the plan, you must implement a mechanism to parse the output and actually call the functions! + +Example prompt: +``` +Please construct a systematic plan to generate an optimal response to the user instruction, utilizing a set of provided tools. Each plan will correspond to an evidence value, which will be the output of one of the available functions given an input string +that could be the user's question, one or more prior evidence values, or a combination of both. + +Here are the tools available to be called: +DuckDuckGo[input]: Tool that enables the user to search for information using the DuckDuckGo search engine. This tool is helpful in locating a suitable list of sites that may contain the answer to the user's query. It does not directly answer the question but finds a list of sites that might have the answer. +TextScraper[input]: Load one or more websites from the input string containing newline delimited links, where input is one or more links, and produces plain text output containing the content of the links. +HyperlinkExtractor[input]: Extract links from plain text and produces a plain text, newline delimited response of links. +KnowledgeModel[input]: Question answering language model, particularly useful in answering questions based on an input passage of text. The input must be a text question that references an :evidence[n]: variable, e.g. What color is the cat, given :evidence1:? + +The output should be in format: +Plan: [first action to take based on input question] +:evidence0: = [function to call with input parameter] +Plan: [next action to take, based on result of :evidence0:] +:evidence1: = [next function to call and input parameter, which may include reference to previous evidence, e.g. "Given context :evidence0"] +... +Answer: [:evidence[n]: containing the final answer.] + +Question: Who won the Nobel Prize in Literature in 2020 and what is their most famous work? +``` + +Response: +``` +Plan: Begin by conducting a web search to find out who won the Nobel Prize in Literature in 2020. +:evidence0: = DuckDuckGo[Nobel Prize in Literature 2020 winner] +Plan: Utilize the KnowledgeModel to interpret the search results and find the name of the laureate. +:evidence1: = KnowledgeModel[Who won the Nobel Prize in Literature in 2020, given :evidence0:?] +Plan: Conduct another web search to find the most famous work of the identified laureate. +:evidence2: = DuckDuckGo[Most famous work of :evidence1:] +Plan: Extract the relevant links from the DuckDuckGo search results for a more focused search. +:evidence3: = HyperlinkExtractor[:evidence2:] +Plan: Use the TextScraper tool to extract information from the relevant links. +:evidence4: = TextScraper[:evidence3:] +Plan: Finally, utilize the KnowledgeModel to identify and summarize the most famous work of the laureate from the extracted information. +:evidence5: = KnowledgeModel[What is the most famous work of :evidence1:, given :evidence4:?] +Answer: :evidence5: +``` + +For this to be useful, you'd have to parse the output plan text, and implement/call each of the functions. This is just pseudo-code, completely untested off the top of my head, and obviously would requiring full implementation + hardening: + +```python +import re +import requests + +def inject_context(input_text, **context): + for ref in set(re.findall(r"(:evidence[0-9]+:)", input_text, re.I)): + input_text = input_text.replace(ref, context.get(ref, "")) + return input_text + +def duckduckgo(input_text, **context): + search_string = inject_context(input_text, **context) + ... search via duck duck go using search_string + ... return text content + +def link_extractor(input_text, **context): + input_text = inject_context(input_text, **context) + return "\n".join(list(set(re.findall(r"(https?://[^\s]+?\.?)", input_text, re.I)))) + +def scrape(input_text, **context): + input_text = inject_context(input_text, **context) + text = [] + for link in input_text.splitlines(): + text.append(requests.get(link).text) + return "\n".join(text) + +def infer(input_text, **context) + prompt = inject_context(input_text, **context) + ... call model with prompt, return output + +def parse_plan(plan): + method_map = { + "DuckDuckGo": duckduckgo, + "HyperlinkExtractor": link_extractor, + "KnowledgeModel": infer, + "TextScraper": scrape, + } + context = {} + for line in plan.strip().splitlines(): + if line.startswith("Plan:"): + print(line) + continue + parts = re.match("^(:evidence[0-9]+:)\s*=\s*([^\[]+])(\[.*\])\s$", line, re.I) + if not parts: + if line.startswith("Answer: "): + return context.get(line.split(" ")[-1].strip(), "Answer couldn't be generated...") + raise RuntimeError("bad format: " + line) + context[parts.group(1)] = method_map[parts.group(2)](parts.group(3), **context) +``` + +### Contribute + +If you're interested in new functionality, particularly a new "instructor" type to generate a specific type of training data, +take a look at the dataset generation tool repo: https://github.com/jondurbin/airoboros and either make a PR or open an issue with details. + +To help me with the OpenAI/compute costs: + +- https://bmc.link/jondurbin +- ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11 +- BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf + +### Licence and usage restrictions + +This model is built on top of the original llama-30b, which has a strict noncommercial license. + +The fine-tuning data was generated by OpenAI API calls to gpt-4, via [airoboros](https://github.com/jondurbin/airoboros) + +The ToS for OpenAI API usage has a clause preventing the output from being used to train a model that __competes__ with OpenAI + +- what does *compete* actually mean here? +- these small open source models will not produce output anywhere near the quality of gpt-4, or even gpt-3.5, so I can't imagine this could credibly be considered competing in the first place +- if someone else uses the dataset to do the same, they wouldn't necessarily be violating the ToS because they didn't call the API, so I don't know how that works +- the training data used in essentially all large language models includes a significant amount of copyrighted or otherwise non-permissive licensing in the first place +- other work using the self-instruct method, e.g. the original here: https://github.com/yizhongw/self-instruct released the data and model as apache-2 + +I am purposingly leaving this license ambiguous (other than the fact you must comply with the Meta original license for llama-2) because I am not a lawyer and refuse to attempt to interpret all of the terms accordingly. + +Your best bet is probably to avoid using this commercially due to the OpenAI API usage. + +Either way, by using this model, you agree to completely indemnify me. \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..ac48ffe --- /dev/null +++ b/config.json @@ -0,0 +1,34 @@ +{ + "_name_or_path": "llama-30b-hf/-dequantized", + "architectures": [ + "LlamaForCausalLM" + ], + "bos_token_id": 0, + "eos_token_id": 1, + "hidden_act": "silu", + "hidden_size": 6656, + "initializer_range": 0.02, + "intermediate_size": 17920, + "max_position_embeddings": 4096, + "max_sequence_length": 4096, + "model_type": "llama", + "num_attention_heads": 52, + "num_hidden_layers": 60, + "num_key_value_heads": 52, + "pad_token_id": -1, + "rms_norm_eps": 1e-06, + "rope_scaling": { + "factor": 2.0, + "type": "linear" + }, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.31.0", + "use_cache": true, + "vocab_size": 32000, + "auto_map": { + "AutoModel": "modelling_llama.LlamaModel", + "AutoModelForCausalLM": "modelling_llama.LlamaForCausalLM", + "AutoModelForSequenceClassification": "modelling_llama.LlamaForSequenceClassification" + } +} diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..4fd1bf1 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,7 @@ +{ + "_from_model_config": true, + "bos_token_id": 0, + "eos_token_id": 1, + "pad_token_id": 0, + "transformers_version": "4.31.0" +} diff --git a/llama_rope_scaled_monkey_patch.py b/llama_rope_scaled_monkey_patch.py new file mode 100644 index 0000000..2d39529 --- /dev/null +++ b/llama_rope_scaled_monkey_patch.py @@ -0,0 +1,65 @@ +import torch +import transformers +import transformers.models.llama.modeling_llama +from einops import rearrange +import random + +# This monkey patch file is not needed if using ExLlama, or if using `trust_remote_code=True`` + +class ScaledRotaryEmbedding(torch.nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) + self.register_buffer("inv_freq", inv_freq) + + max_position_embeddings = 4096 + + # Build here to make `torch.jit.trace` work. + self.max_seq_len_cached = max_position_embeddings + t = torch.arange( + self.max_seq_len_cached, + device=self.inv_freq.device, + dtype=self.inv_freq.dtype, + ) + + self.scale = 1 / 2 + t *= self.scale + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer( + "cos_cached", emb.cos()[None, None, :, :], persistent=False + ) + self.register_buffer( + "sin_cached", emb.sin()[None, None, :, :], persistent=False + ) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case. + if seq_len > self.max_seq_len_cached: + self.max_seq_len_cached = seq_len + t = torch.arange( + self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype + ) + t *= self.scale + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1).to(x.device) + self.register_buffer( + "cos_cached", emb.cos()[None, None, :, :], persistent=False + ) + self.register_buffer( + "sin_cached", emb.sin()[None, None, :, :], persistent=False + ) + return ( + self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + ) + + +def replace_llama_rope_with_scaled_rope(): + transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = ( + ScaledRotaryEmbedding + ) diff --git a/modelling_llama.py b/modelling_llama.py new file mode 100644 index 0000000..8c10f87 --- /dev/null +++ b/modelling_llama.py @@ -0,0 +1,894 @@ +# coding=utf-8 +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch LLaMA model.""" +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from transformers.models.llama.modeling_llama import LlamaConfig + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "LlamaConfig" + + +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +class LlamaRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + LlamaRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + + return (self.weight * hidden_states).to(input_dtype) + + +class LlamaRotaryEmbedding(torch.nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, scale=1, device=None): + super().__init__() + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) + self.register_buffer("inv_freq", inv_freq) + + # Build here to make `torch.jit.trace` work. + self.max_seq_len_cached = max_position_embeddings + t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype) + + self.scale = scale + t *= self.scale + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + dtype = torch.get_default_dtype() + self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case. + if seq_len > self.max_seq_len_cached: + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1).to(x.device) + self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(x.dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(x.dtype), persistent=False) + return ( + self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + ) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids): + # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. + cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] + sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] + cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] + sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class LlamaMLP(nn.Module): + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + ): + super().__init__() + self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) + self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) + self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) + self.act_fn = ACT2FN[hidden_act] + + def forward(self, x): + return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + +class LlamaAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: LlamaConfig): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.max_position_embeddings = config.max_position_embeddings + self.position_embeddings_scale = 2048 / self.max_position_embeddings + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, scale=self.position_embeddings_scale) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + # [bsz, nh, t, hd] + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + attn_weights = torch.max( + attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device) + ) + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class LlamaDecoderLayer(nn.Module): + def __init__(self, config: LlamaConfig): + super().__init__() + self.hidden_size = config.hidden_size + self.self_attn = LlamaAttention(config=config) + self.mlp = LlamaMLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + ) + self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +LLAMA_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`LlamaConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", + LLAMA_START_DOCSTRING, +) +class LlamaPreTrainedModel(PreTrainedModel): + config_class = LlamaConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["LlamaDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, LlamaModel): + module.gradient_checkpointing = value + + +LLAMA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", + LLAMA_START_DOCSTRING, +) +class LlamaModel(LlamaPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] + + Args: + config: LlamaConfig + """ + + def __init__(self, config: LlamaConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + # embed positions + if attention_mask is None: + attention_mask = torch.ones( + (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device + ) + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, None) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + position_ids, + None, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class LlamaForCausalLM(LlamaPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = LlamaModel(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, LlamaForCausalLM + + >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + if past_key_values: + input_ids = input_ids[:, -1:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -1].unsqueeze(-1) + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings( + """ + The LLaMa Model transformer with a sequence classification head on top (linear layer). + + [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + LLAMA_START_DOCSTRING, +) +class LlamaForSequenceClassification(LlamaPreTrainedModel): + _keys_to_ignore_on_load_missing = [r"lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = LlamaModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) diff --git a/pytorch_model-00001-of-00007.bin b/pytorch_model-00001-of-00007.bin new file mode 100644 index 0000000..e011445 --- /dev/null +++ b/pytorch_model-00001-of-00007.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6ef57219d25fe2e1e0107af896dce11a8f45f484c95e0e8f925ff9605470188 +size 9818326330 diff --git a/pytorch_model-00002-of-00007.bin b/pytorch_model-00002-of-00007.bin new file mode 100644 index 0000000..07778e0 --- /dev/null +++ b/pytorch_model-00002-of-00007.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bc59db4a265ae459c77025227f77d309e2b6519d99891ccc19cfb6e5817c204 +size 9958104574 diff --git a/pytorch_model-00003-of-00007.bin b/pytorch_model-00003-of-00007.bin new file mode 100644 index 0000000..bfa27a4 --- /dev/null +++ b/pytorch_model-00003-of-00007.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5c6beff255ed5bf62f94d7455377fbbd2ab798f868b4cb48d14fa075eada4bb +size 9896736546 diff --git a/pytorch_model-00004-of-00007.bin b/pytorch_model-00004-of-00007.bin new file mode 100644 index 0000000..7a26c39 --- /dev/null +++ b/pytorch_model-00004-of-00007.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d94485fddef7aa344c79b10e97e400624b088c96c18cd56eccdfca61e7cf35a5 +size 9869472248 diff --git a/pytorch_model-00005-of-00007.bin b/pytorch_model-00005-of-00007.bin new file mode 100644 index 0000000..62f066c --- /dev/null +++ b/pytorch_model-00005-of-00007.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67e60e06b47af6b0cc2b71da7850e57dccc528c36db0f221073b07e82b5387e1 +size 9869472212 diff --git a/pytorch_model-00006-of-00007.bin b/pytorch_model-00006-of-00007.bin new file mode 100644 index 0000000..1ae54cf --- /dev/null +++ b/pytorch_model-00006-of-00007.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d2a553317349dfd2a378d859630a4ce5c78efb5ee1ef1db62b8a50a50bdad58 +size 9958104574 diff --git a/pytorch_model-00007-of-00007.bin b/pytorch_model-00007-of-00007.bin new file mode 100644 index 0000000..2bada9d --- /dev/null +++ b/pytorch_model-00007-of-00007.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b508f9e962640fc105526d0dbbe9f94e72aefda3b088613e1879abcfad0ed9bc +size 5687904472 diff --git a/pytorch_model.bin.index.json b/pytorch_model.bin.index.json new file mode 100644 index 0000000..4601490 --- /dev/null +++ b/pytorch_model.bin.index.json @@ -0,0 +1,610 @@ +{ + "metadata": { + "total_size": 65057902592 + }, + "weight_map": { + "lm_head.weight": "pytorch_model-00007-of-00007.bin", + "model.embed_tokens.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00007.bin", + "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00007.bin", + "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.10.input_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.10.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.10.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin", + "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.11.input_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.11.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.11.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin", + "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.12.input_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.12.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.12.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin", + "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.13.input_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.13.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.13.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin", + "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.14.input_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.14.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.14.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin", + "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.15.input_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.15.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.15.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin", + "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.16.input_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.16.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.16.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin", + "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.17.input_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.17.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.17.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin", + "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.18.input_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.18.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.18.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin", + "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.19.input_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.19.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.19.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin", + "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00007.bin", + "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.20.input_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.20.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.20.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin", + "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.21.input_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.21.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.21.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin", + "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.22.input_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.22.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.22.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin", + "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.23.input_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.23.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.23.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin", + "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.24.input_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.24.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.24.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin", + "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.25.input_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.25.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.25.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin", + "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.26.input_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.26.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.26.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin", + "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.27.input_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.27.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.27.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin", + "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin", + "model.layers.28.input_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.28.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.28.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin", + "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.29.input_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.29.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.29.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin", + "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00007.bin", + "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.30.input_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.30.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.30.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin", + "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.31.input_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.31.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.31.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin", + "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.32.input_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.32.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.32.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.32.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.32.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.32.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.32.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.32.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.32.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin", + "model.layers.32.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.33.input_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.33.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.33.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.33.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.33.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.33.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.33.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.33.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.33.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin", + "model.layers.33.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.34.input_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.34.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.34.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.34.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.34.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.34.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.34.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.34.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.34.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin", + "model.layers.34.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.35.input_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.35.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.35.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.35.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.35.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.35.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.35.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.35.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.35.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin", + "model.layers.35.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.36.input_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.36.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.36.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.36.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.36.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.36.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.36.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.36.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.36.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin", + "model.layers.36.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin", + "model.layers.37.input_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.37.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.37.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.37.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.37.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.37.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.37.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.37.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.37.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin", + "model.layers.37.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.38.input_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.38.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.38.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.38.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.38.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.38.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.38.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.38.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.38.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin", + "model.layers.38.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.39.input_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.39.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.39.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.39.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.39.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.39.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.39.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.39.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.39.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin", + "model.layers.39.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00007.bin", + "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.40.input_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.40.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.40.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.40.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.40.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.40.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.40.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.40.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.40.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin", + "model.layers.40.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.41.input_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.41.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.41.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.41.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.41.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.41.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.41.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.41.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.41.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin", + "model.layers.41.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.42.input_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.42.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.42.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.42.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.42.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.42.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.42.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.42.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.42.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin", + "model.layers.42.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.43.input_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.43.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.43.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.43.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.43.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.43.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.43.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.43.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.43.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin", + "model.layers.43.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.44.input_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.44.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.44.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.44.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.44.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.44.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.44.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.44.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.44.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin", + "model.layers.44.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.45.input_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.45.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.45.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.45.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.45.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.45.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.45.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.45.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.45.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin", + "model.layers.45.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin", + "model.layers.46.input_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.46.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.46.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.46.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.46.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.46.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.46.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.46.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.46.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin", + "model.layers.46.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.47.input_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.47.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.47.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.47.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.47.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.47.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.47.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.47.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.47.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin", + "model.layers.47.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.48.input_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.48.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.48.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.48.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.48.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.48.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.48.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.48.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.48.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin", + "model.layers.48.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.49.input_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.49.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.49.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.49.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.49.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.49.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.49.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.49.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.49.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin", + "model.layers.49.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00007.bin", + "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.50.input_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.50.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.50.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.50.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.50.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.50.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.50.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.50.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.50.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin", + "model.layers.50.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.51.input_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.51.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.51.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.51.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.51.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.51.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.51.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.51.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.51.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin", + "model.layers.51.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.52.input_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.52.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.52.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.52.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.52.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.52.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.52.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.52.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.52.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin", + "model.layers.52.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.53.input_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.53.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.53.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.53.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.53.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.53.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.53.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.53.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.53.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin", + "model.layers.53.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.54.input_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.54.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.54.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.54.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.54.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.54.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.54.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.54.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.54.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin", + "model.layers.54.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.55.input_layernorm.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.55.mlp.down_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.55.mlp.gate_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.55.mlp.up_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.55.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.55.self_attn.k_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.55.self_attn.o_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.55.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin", + "model.layers.55.self_attn.rotary_emb.inv_freq": "pytorch_model-00007-of-00007.bin", + "model.layers.55.self_attn.v_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.56.input_layernorm.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.56.mlp.down_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.56.mlp.gate_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.56.mlp.up_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.56.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.56.self_attn.k_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.56.self_attn.o_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.56.self_attn.q_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.56.self_attn.rotary_emb.inv_freq": "pytorch_model-00007-of-00007.bin", + "model.layers.56.self_attn.v_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.57.input_layernorm.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.57.mlp.down_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.57.mlp.gate_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.57.mlp.up_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.57.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.57.self_attn.k_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.57.self_attn.o_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.57.self_attn.q_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.57.self_attn.rotary_emb.inv_freq": "pytorch_model-00007-of-00007.bin", + "model.layers.57.self_attn.v_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.58.input_layernorm.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.58.mlp.down_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.58.mlp.gate_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.58.mlp.up_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.58.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.58.self_attn.k_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.58.self_attn.o_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.58.self_attn.q_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.58.self_attn.rotary_emb.inv_freq": "pytorch_model-00007-of-00007.bin", + "model.layers.58.self_attn.v_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.59.input_layernorm.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.59.mlp.down_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.59.mlp.gate_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.59.mlp.up_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.59.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.59.self_attn.k_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.59.self_attn.o_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.59.self_attn.q_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.59.self_attn.rotary_emb.inv_freq": "pytorch_model-00007-of-00007.bin", + "model.layers.59.self_attn.v_proj.weight": "pytorch_model-00007-of-00007.bin", + "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00007.bin", + "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00007.bin", + "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.8.input_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.8.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00007.bin", + "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin", + "model.layers.9.input_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.9.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.9.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin", + "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin", + "model.norm.weight": "pytorch_model-00007-of-00007.bin" + } +} diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..f928b24 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,24 @@ +{ + "bos_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "pad_token": "", + "unk_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizer.model b/tokenizer.model new file mode 100644 index 0000000..6c00c74 --- /dev/null +++ b/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 +size 499723 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..6447b79 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,35 @@ +{ + "add_bos_token": true, + "add_eos_token": false, + "bos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "clean_up_tokenization_spaces": false, + "eos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "legacy": true, + "model_max_length": 4096, + "pad_token": null, + "padding_side": "right", + "sp_model_kwargs": {}, + "tokenizer_class": "LlamaTokenizer", + "unk_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + } +}