初始化项目,由ModelHub XC社区提供模型
Model: CCCCCyx/Llama-3.2-3B-Instruct_slime Source: Original Platform
This commit is contained in:
36
.gitattributes
vendored
Normal file
36
.gitattributes
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
||||
111
LICENSE.txt
Normal file
111
LICENSE.txt
Normal file
@@ -0,0 +1,111 @@
|
||||
LLAMA 3.2 COMMUNITY LICENSE AGREEMENT
|
||||
Llama 3.2 Version Release Date: September 25, 2024
|
||||
|
||||
“Agreement” means the terms and conditions for use, reproduction, distribution
|
||||
and modification of the Llama Materials set forth herein.
|
||||
|
||||
“Documentation” means the specifications, manuals and documentation accompanying Llama 3.2
|
||||
distributed by Meta at https://llama.meta.com/doc/overview.
|
||||
|
||||
“Licensee” or “you” means you, or your employer or any other person or entity (if you are
|
||||
entering into this Agreement on such person or entity’s behalf), of the age required under
|
||||
applicable laws, rules or regulations to provide legal consent and that has legal authority
|
||||
to bind your employer or such other person or entity if you are entering in this Agreement
|
||||
on their behalf.
|
||||
|
||||
“Llama 3.2” means the foundational large language models and software and algorithms, including
|
||||
machine-learning model code, trained model weights, inference-enabling code, training-enabling code,
|
||||
fine-tuning enabling code and other elements of the foregoing distributed by Meta at
|
||||
https://www.llama.com/llama-downloads.
|
||||
|
||||
“Llama Materials” means, collectively, Meta’s proprietary Llama 3.2 and Documentation (and
|
||||
any portion thereof) made available under this Agreement.
|
||||
|
||||
“Meta” or “we” means Meta Platforms Ireland Limited (if you are located in or,
|
||||
if you are an entity, your principal place of business is in the EEA or Switzerland)
|
||||
and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).
|
||||
|
||||
|
||||
By clicking “I Accept” below or by using or distributing any portion or element of the Llama Materials,
|
||||
you agree to be bound by this Agreement.
|
||||
|
||||
|
||||
1. License Rights and Redistribution.
|
||||
|
||||
a. Grant of Rights. You are granted a non-exclusive, worldwide,
|
||||
non-transferable and royalty-free limited license under Meta’s intellectual property or other rights
|
||||
owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works
|
||||
of, and make modifications to the Llama Materials.
|
||||
|
||||
b. Redistribution and Use.
|
||||
|
||||
i. If you distribute or make available the Llama Materials (or any derivative works thereof),
|
||||
or a product or service (including another AI model) that contains any of them, you shall (A) provide
|
||||
a copy of this Agreement with any such Llama Materials; and (B) prominently display “Built with Llama”
|
||||
on a related website, user interface, blogpost, about page, or product documentation. If you use the
|
||||
Llama Materials or any outputs or results of the Llama Materials to create, train, fine tune, or
|
||||
otherwise improve an AI model, which is distributed or made available, you shall also include “Llama”
|
||||
at the beginning of any such AI model name.
|
||||
|
||||
ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part
|
||||
of an integrated end user product, then Section 2 of this Agreement will not apply to you.
|
||||
|
||||
iii. You must retain in all copies of the Llama Materials that you distribute the
|
||||
following attribution notice within a “Notice” text file distributed as a part of such copies:
|
||||
“Llama 3.2 is licensed under the Llama 3.2 Community License, Copyright © Meta Platforms,
|
||||
Inc. All Rights Reserved.”
|
||||
|
||||
iv. Your use of the Llama Materials must comply with applicable laws and regulations
|
||||
(including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for
|
||||
the Llama Materials (available at https://www.llama.com/llama3_2/use-policy), which is hereby
|
||||
incorporated by reference into this Agreement.
|
||||
|
||||
2. Additional Commercial Terms. If, on the Llama 3.2 version release date, the monthly active users
|
||||
of the products or services made available by or for Licensee, or Licensee’s affiliates,
|
||||
is greater than 700 million monthly active users in the preceding calendar month, you must request
|
||||
a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to
|
||||
exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights.
|
||||
|
||||
3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND
|
||||
RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS
|
||||
ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES
|
||||
OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE
|
||||
FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED
|
||||
WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
|
||||
|
||||
4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT,
|
||||
FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN
|
||||
IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
|
||||
|
||||
5. Intellectual Property.
|
||||
|
||||
a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials,
|
||||
neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates,
|
||||
except as required for reasonable and customary use in describing and redistributing the Llama Materials or as
|
||||
set forth in this Section 5(a). Meta hereby grants you a license to use “Llama” (the “Mark”) solely as required
|
||||
to comply with the last sentence of Section 1.b.i. You will comply with Meta’s brand guidelines (currently accessible
|
||||
at https://about.meta.com/brand/resources/meta/company-brand/). All goodwill arising out of your use of the Mark
|
||||
will inure to the benefit of Meta.
|
||||
|
||||
b. Subject to Meta’s ownership of Llama Materials and derivatives made by or for Meta, with respect to any
|
||||
derivative works and modifications of the Llama Materials that are made by you, as between you and Meta,
|
||||
you are and will be the owner of such derivative works and modifications.
|
||||
|
||||
c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or
|
||||
counterclaim in a lawsuit) alleging that the Llama Materials or Llama 3.2 outputs or results, or any portion
|
||||
of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable
|
||||
by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or
|
||||
claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third
|
||||
party arising out of or related to your use or distribution of the Llama Materials.
|
||||
|
||||
6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access
|
||||
to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms
|
||||
and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this
|
||||
Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3,
|
||||
4 and 7 shall survive the termination of this Agreement.
|
||||
|
||||
7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of
|
||||
California without regard to choice of law principles, and the UN Convention on Contracts for the International
|
||||
Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of
|
||||
any dispute arising out of this Agreement.
|
||||
67
README.md
Normal file
67
README.md
Normal file
@@ -0,0 +1,67 @@
|
||||
---
|
||||
base_model: meta-llama/Llama-3.2-3B-Instruct
|
||||
language:
|
||||
- en
|
||||
library_name: transformers
|
||||
license: llama3.2
|
||||
tags:
|
||||
- llama-3
|
||||
- llama
|
||||
- meta
|
||||
- facebook
|
||||
- unsloth
|
||||
- transformers
|
||||
---
|
||||
|
||||
## ***See [our collection](https://huggingface.co/collections/unsloth/llama-32-66f46afde4ca573864321a22) for all versions of Llama 3.2 including GGUF, 4-bit and original 16-bit formats.***
|
||||
|
||||
# Finetune Llama 3.2, Gemma 2, Mistral 2-5x faster with 70% less memory via Unsloth!
|
||||
|
||||
We have a free Google Colab Tesla T4 notebook for Llama 3.2 (3B) here: https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing
|
||||
|
||||
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/Discord%20button.png" width="200"/>](https://discord.gg/unsloth)
|
||||
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
||||
|
||||
# unsloth/Llama-3.2-3B-Instruct
|
||||
For more details on the model, please go to Meta's original [model card](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct)
|
||||
|
||||
## ✨ Finetune for Free
|
||||
|
||||
All notebooks are **beginner friendly**! Add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF, vLLM or uploaded to Hugging Face.
|
||||
|
||||
| Unsloth supports | Free Notebooks | Performance | Memory use |
|
||||
|-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------|
|
||||
| **Llama-3.2 (3B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2.4x faster | 58% less |
|
||||
| **Llama-3.1 (11B vision)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2.4x faster | 58% less |
|
||||
| **Llama-3.1 (8B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2.4x faster | 58% less |
|
||||
| **Phi-3.5 (mini)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less |
|
||||
| **Gemma 2 (9B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2.4x faster | 58% less |
|
||||
| **Mistral (7B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 62% less |
|
||||
| **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 19% less |
|
||||
|
||||
- This [conversational notebook](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing) is useful for ShareGPT ChatML / Vicuna templates.
|
||||
- This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for raw text. This [DPO notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) replicates Zephyr.
|
||||
- \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster.
|
||||
|
||||
## Special Thanks
|
||||
A huge thank you to the Meta and Llama team for creating and releasing these models.
|
||||
|
||||
## Model Information
|
||||
|
||||
The Meta Llama 3.2 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction-tuned generative models in 1B and 3B sizes (text in/text out). The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.
|
||||
|
||||
**Model developer**: Meta
|
||||
|
||||
**Model Architecture:** Llama 3.2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.
|
||||
|
||||
**Supported languages:** English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai are officially supported. Llama 3.2 has been trained on a broader collection of languages than these 8 supported languages. Developers may fine-tune Llama 3.2 models for languages beyond these supported languages, provided they comply with the Llama 3.2 Community License and the Acceptable Use Policy. Developers are always expected to ensure that their deployments, including those that involve additional languages, are completed safely and responsibly.
|
||||
|
||||
**Llama 3.2 family of models** Token counts refer to pretraining data only. All model versions use Grouped-Query Attention (GQA) for improved inference scalability.
|
||||
|
||||
**Model Release Date:** Sept 25, 2024
|
||||
|
||||
**Status:** This is a static model trained on an offline dataset. Future versions may be released that improve model capabilities and safety.
|
||||
|
||||
**License:** Use of Llama 3.2 is governed by the [Llama 3.2 Community License](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE) (a custom, commercial license agreement).
|
||||
|
||||
Where to send questions or comments about the model Instructions on how to provide feedback or comments on the model can be found in the model [README](https://github.com/meta-llama/llama3). For more technical information about generation parameters and recipes for how to use Llama 3.1 in applications, please go [here](https://github.com/meta-llama/llama-recipes).
|
||||
93
chat_template.jinja
Normal file
93
chat_template.jinja
Normal file
@@ -0,0 +1,93 @@
|
||||
{{- bos_token }}
|
||||
{%- if custom_tools is defined %}
|
||||
{%- set tools = custom_tools %}
|
||||
{%- endif %}
|
||||
{%- if not tools_in_user_message is defined %}
|
||||
{%- set tools_in_user_message = true %}
|
||||
{%- endif %}
|
||||
{%- if not date_string is defined %}
|
||||
{%- if strftime_now is defined %}
|
||||
{%- set date_string = strftime_now("%d %b %Y") %}
|
||||
{%- else %}
|
||||
{%- set date_string = "26 Jul 2024" %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if not tools is defined %}
|
||||
{%- set tools = none %}
|
||||
{%- endif %}
|
||||
|
||||
{#- This block extracts the system message, so we can slot it into the right place. #}
|
||||
{%- if messages[0]['role'] == 'system' %}
|
||||
{%- set system_message = messages[0]['content']|trim %}
|
||||
{%- set messages = messages[1:] %}
|
||||
{%- else %}
|
||||
{%- set system_message = "" %}
|
||||
{%- endif %}
|
||||
|
||||
{#- System message #}
|
||||
{{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
|
||||
{%- if tools is not none %}
|
||||
{{- "Environment: ipython\n" }}
|
||||
{%- endif %}
|
||||
{{- "Cutting Knowledge Date: December 2023\n" }}
|
||||
{{- "Today Date: " + date_string + "\n\n" }}
|
||||
{%- if tools is not none and not tools_in_user_message %}
|
||||
{{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}
|
||||
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
|
||||
{{- "Do not use variables.\n\n" }}
|
||||
{%- for t in tools %}
|
||||
{{- t | tojson(indent=4) }}
|
||||
{{- "\n\n" }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{{- system_message }}
|
||||
{{- "<|eot_id|>" }}
|
||||
|
||||
{#- Custom tools are passed in a user message with some extra guidance #}
|
||||
{%- if tools_in_user_message and not tools is none %}
|
||||
{#- Extract the first user message so we can plug it in here #}
|
||||
{%- if messages | length != 0 %}
|
||||
{%- set first_user_message = messages[0]['content']|trim %}
|
||||
{%- set messages = messages[1:] %}
|
||||
{%- else %}
|
||||
{{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
|
||||
{%- endif %}
|
||||
{{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
|
||||
{{- "Given the following functions, please respond with a JSON for a function call " }}
|
||||
{{- "with its proper arguments that best answers the given prompt.\n\n" }}
|
||||
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
|
||||
{{- "Do not use variables.\n\n" }}
|
||||
{%- for t in tools %}
|
||||
{{- t | tojson(indent=4) }}
|
||||
{{- "\n\n" }}
|
||||
{%- endfor %}
|
||||
{{- first_user_message + "<|eot_id|>"}}
|
||||
{%- endif %}
|
||||
|
||||
{%- for message in messages %}
|
||||
{%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
|
||||
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}
|
||||
{%- elif 'tool_calls' in message %}
|
||||
{%- if not message.tool_calls|length == 1 %}
|
||||
{{- raise_exception("This model only supports single tool-calls at once!") }}
|
||||
{%- endif %}
|
||||
{%- set tool_call = message.tool_calls[0].function %}
|
||||
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
|
||||
{{- '{"name": "' + tool_call.name + '", ' }}
|
||||
{{- '"parameters": ' }}
|
||||
{{- tool_call.arguments | tojson }}
|
||||
{{- "}" }}
|
||||
{{- "<|eot_id|>" }}
|
||||
{%- elif message.role == "tool" or message.role == "ipython" %}
|
||||
{{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
|
||||
{%- if message.content is mapping or message.content is iterable %}
|
||||
{{- message.content | tojson }}
|
||||
{%- else %}
|
||||
{{- message.content }}
|
||||
{%- endif %}
|
||||
{{- "<|eot_id|>" }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- if add_generation_prompt %}
|
||||
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
|
||||
{%- endif %}
|
||||
37
config.json
Normal file
37
config.json
Normal file
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"architectures": [
|
||||
"LlamaForCausalLM"
|
||||
],
|
||||
"attention_bias": false,
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 128000,
|
||||
"eos_token_id": 128009,
|
||||
"head_dim": 128,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 3072,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 8192,
|
||||
"max_position_embeddings": 131072,
|
||||
"mlp_bias": false,
|
||||
"model_type": "llama",
|
||||
"num_attention_heads": 24,
|
||||
"num_hidden_layers": 28,
|
||||
"num_key_value_heads": 8,
|
||||
"pad_token_id": 128004,
|
||||
"pretraining_tp": 1,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_scaling": {
|
||||
"factor": 32.0,
|
||||
"high_freq_factor": 4.0,
|
||||
"low_freq_factor": 1.0,
|
||||
"original_max_position_embeddings": 8192,
|
||||
"rope_type": "llama3"
|
||||
},
|
||||
"rope_theta": 500000.0,
|
||||
"tie_word_embeddings": true,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.52.4",
|
||||
"unsloth_fixed": true,
|
||||
"use_cache": true,
|
||||
"vocab_size": 128256
|
||||
}
|
||||
14
generation_config.json
Normal file
14
generation_config.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"bos_token_id": 128000,
|
||||
"do_sample": true,
|
||||
"eos_token_id": [
|
||||
128001,
|
||||
128008,
|
||||
128009
|
||||
],
|
||||
"max_length": 131072,
|
||||
"pad_token_id": 128004,
|
||||
"temperature": 0.6,
|
||||
"top_p": 0.9,
|
||||
"transformers_version": "4.52.4"
|
||||
}
|
||||
3
model-00000-of-00002.safetensors
Normal file
3
model-00000-of-00002.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:7eea2001c8d0effe0ce3a0332cf23c61432cfc1d8e737bd5bc40f3468f1e7162
|
||||
size 5368562064
|
||||
3
model-00001-of-00002.safetensors
Normal file
3
model-00001-of-00002.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6821857b08efebaf224fa42cea141ae7ef787b24cf8ea8cfaafe6010c3de34c1
|
||||
size 1056966984
|
||||
261
model.safetensors.index.json
Normal file
261
model.safetensors.index.json
Normal file
@@ -0,0 +1,261 @@
|
||||
{
|
||||
"metadata": {
|
||||
"total_size": 6425499648
|
||||
},
|
||||
"weight_map": {
|
||||
"model.embed_tokens.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.0.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.1.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.2.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.3.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.4.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.5.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.6.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.7.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.8.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.9.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.10.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.11.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.12.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.13.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.14.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.15.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.16.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.17.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.18.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.19.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.20.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.21.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.22.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.23.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.24.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.25.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.26.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.27.input_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.0.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.1.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.2.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.3.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.4.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.5.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.6.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.7.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.8.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.9.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.10.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.11.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.12.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.13.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.14.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.15.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.16.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.17.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.18.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.19.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.20.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.21.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.22.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.23.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.24.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.25.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.26.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.27.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.norm.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.0.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.0.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.1.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.1.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.2.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.2.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.3.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.3.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.4.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.4.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.5.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.5.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.6.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.6.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.7.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.7.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.8.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.8.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.9.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.9.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.10.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.10.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.11.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.11.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.12.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.12.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.13.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.13.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.14.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.14.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.15.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.15.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.16.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.16.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.17.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.17.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.18.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.18.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.19.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.19.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.20.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.20.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.21.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.21.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.22.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.22.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.23.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.23.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.24.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.24.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.25.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.25.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.26.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.26.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.27.mlp.gate_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.27.mlp.up_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.0.mlp.down_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.1.mlp.down_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.2.mlp.down_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.3.mlp.down_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.4.mlp.down_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.5.mlp.down_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.6.mlp.down_proj.weight": "model-00000-of-00002.safetensors",
|
||||
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.mlp.down_proj.weight": "model-00001-of-00002.safetensors"
|
||||
}
|
||||
}
|
||||
23
special_tokens_map.json
Normal file
23
special_tokens_map.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<|begin_of_text|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "<|eot_id|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": {
|
||||
"content": "<|finetune_right_pad_id|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
3
tokenizer.json
Normal file
3
tokenizer.json
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
|
||||
size 17209920
|
||||
2067
tokenizer_config.json
Normal file
2067
tokenizer_config.json
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user