初始化项目,由ModelHub XC社区提供模型
Model: RthItalia/NanoLLM-Qwen2.5-14B-v3.1 Source: Original Platform
This commit is contained in:
38
.gitattributes
vendored
Normal file
38
.gitattributes
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.model filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
|
||||||
|
quantized_modules.pt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
nano_compact/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
||||||
202
LICENSE
Normal file
202
LICENSE
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright 2024 Alibaba Cloud
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
28
README.md
Normal file
28
README.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
license: other
|
||||||
|
library_name: transformers
|
||||||
|
base_model: Qwen/Qwen2.5-14B-Instruct
|
||||||
|
tags:
|
||||||
|
- nanollm
|
||||||
|
- qwen2.5
|
||||||
|
- safetensors
|
||||||
|
- text-generation
|
||||||
|
---
|
||||||
|
|
||||||
|
# NanoLLM Qwen2.5-14B-Instruct v3.1
|
||||||
|
|
||||||
|
Compact self-contained NanoLLM format is in `nano_compact/`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
repo_id = "RthItalia/NanoLLM-Qwen2.5-14B-v3.1"
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(repo_id, subfolder="nano_compact", use_fast=True)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(repo_id, subfolder="nano_compact", trust_remote_code=True, device_map="auto")
|
||||||
|
```
|
||||||
|
|
||||||
|
Validation against 8-bit reference:
|
||||||
|
- avg cosine: 0.98984375
|
||||||
|
- min cosine: 0.9765625
|
||||||
|
- gate: avg >= 0.985
|
||||||
|
|
||||||
|
`nano_compact/model.safetensors` contains Nano quantized tensors and does not require downloading the Qwen base weights.
|
||||||
27
config.json
Normal file
27
config.json
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
{
|
||||||
|
"architectures": [
|
||||||
|
"Qwen2ForCausalLM"
|
||||||
|
],
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bos_token_id": 151643,
|
||||||
|
"eos_token_id": 151645,
|
||||||
|
"hidden_act": "silu",
|
||||||
|
"hidden_size": 5120,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 13824,
|
||||||
|
"max_position_embeddings": 32768,
|
||||||
|
"max_window_layers": 70,
|
||||||
|
"model_type": "qwen2",
|
||||||
|
"num_attention_heads": 40,
|
||||||
|
"num_hidden_layers": 48,
|
||||||
|
"num_key_value_heads": 8,
|
||||||
|
"rms_norm_eps": 1e-06,
|
||||||
|
"rope_theta": 1000000.0,
|
||||||
|
"sliding_window": 131072,
|
||||||
|
"tie_word_embeddings": false,
|
||||||
|
"torch_dtype": "bfloat16",
|
||||||
|
"transformers_version": "4.43.1",
|
||||||
|
"use_cache": true,
|
||||||
|
"use_sliding_window": false,
|
||||||
|
"vocab_size": 152064
|
||||||
|
}
|
||||||
14
generation_config.json
Normal file
14
generation_config.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"bos_token_id": 151643,
|
||||||
|
"pad_token_id": 151643,
|
||||||
|
"do_sample": true,
|
||||||
|
"eos_token_id": [
|
||||||
|
151645,
|
||||||
|
151643
|
||||||
|
],
|
||||||
|
"repetition_penalty": 1.05,
|
||||||
|
"temperature": 0.7,
|
||||||
|
"top_p": 0.8,
|
||||||
|
"top_k": 20,
|
||||||
|
"transformers_version": "4.37.0"
|
||||||
|
}
|
||||||
119
load_artifact.py
Normal file
119
load_artifact.py
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
"""Loader NANO-v3.1 UNIVERSAL (Inference Only)"""
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
||||||
|
|
||||||
|
|
||||||
|
class TrueQuantLinear(nn.Module):
|
||||||
|
def __init__(self, pq, ps, pi, dq, ds, di, out_features, bias=None, bits=8, device="cuda:0"):
|
||||||
|
super().__init__()
|
||||||
|
self.out_features = out_features
|
||||||
|
self.bits = int(bits)
|
||||||
|
self.register_buffer("pq", pq.to(device=device, dtype=torch.int8))
|
||||||
|
self.register_buffer("ps", ps.to(device=device, dtype=torch.float16))
|
||||||
|
self.register_buffer("pi", pi.to(device=device, dtype=torch.long))
|
||||||
|
self.register_buffer("dq", dq.to(device=device, dtype=torch.int8))
|
||||||
|
self.register_buffer("ds", ds.to(device=device, dtype=torch.float16))
|
||||||
|
self.register_buffer("di", di.to(device=device, dtype=torch.long))
|
||||||
|
if bias is not None:
|
||||||
|
self.register_buffer("bias", bias.to(device=device, dtype=torch.float16))
|
||||||
|
else:
|
||||||
|
self.bias = None
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
d, dt = x.device, x.dtype
|
||||||
|
f = x.to(torch.float16).reshape(-1, x.shape[-1])
|
||||||
|
o = torch.zeros(f.shape[0], self.out_features, dtype=torch.float16, device=d)
|
||||||
|
if self.pq.shape[0] > 0:
|
||||||
|
o.index_copy_(-1, self.pi.to(d), f @ (self.pq.to(d, torch.float16) * self.ps.to(d).unsqueeze(1)).t())
|
||||||
|
if self.dq.shape[0] > 0:
|
||||||
|
o.index_copy_(-1, self.di.to(d), f @ (self.dq.to(d, torch.float16) * self.ds.to(d).unsqueeze(1)).t())
|
||||||
|
if self.bias is not None:
|
||||||
|
o = o + self.bias.to(d)
|
||||||
|
return o.reshape(*x.shape[:-1], self.out_features).to(dt)
|
||||||
|
|
||||||
|
|
||||||
|
def _set(root, name, value):
|
||||||
|
parts = name.split(".")
|
||||||
|
parent = root
|
||||||
|
for p in parts[:-1]:
|
||||||
|
parent = parent[int(p)] if p.isdigit() else getattr(parent, p)
|
||||||
|
if parts[-1].isdigit():
|
||||||
|
parent[int(parts[-1])] = value
|
||||||
|
else:
|
||||||
|
setattr(parent, parts[-1], value)
|
||||||
|
|
||||||
|
|
||||||
|
def get_module(root, name):
|
||||||
|
cur = root
|
||||||
|
for p in name.split("."):
|
||||||
|
cur = cur[int(p)] if p.isdigit() else getattr(cur, p)
|
||||||
|
return cur
|
||||||
|
|
||||||
|
|
||||||
|
def load_artifact(artifact_dir):
|
||||||
|
d = Path(artifact_dir)
|
||||||
|
spec = json.loads((d / "spec.json").read_text("utf-8"))
|
||||||
|
state = torch.load(d / "quantized_modules.pt", map_location="cpu")
|
||||||
|
|
||||||
|
use_4bit = os.getenv("NANO_LOAD_4BIT", "0").strip().lower() in {"1", "true", "yes", "on"}
|
||||||
|
qcfg = (
|
||||||
|
BitsAndBytesConfig(
|
||||||
|
load_in_4bit=True,
|
||||||
|
bnb_4bit_quant_type="nf4",
|
||||||
|
bnb_4bit_compute_dtype=torch.float16,
|
||||||
|
)
|
||||||
|
if use_4bit
|
||||||
|
else BitsAndBytesConfig(load_in_8bit=True)
|
||||||
|
)
|
||||||
|
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
str(d),
|
||||||
|
quantization_config=qcfg,
|
||||||
|
device_map="auto",
|
||||||
|
)
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(str(d), use_fast=True)
|
||||||
|
if tokenizer.pad_token_id is None:
|
||||||
|
tokenizer.pad_token = tokenizer.eos_token
|
||||||
|
|
||||||
|
for name, s in state.items():
|
||||||
|
dev = next(get_module(model, name).parameters()).device
|
||||||
|
bits = s["bits"]
|
||||||
|
if "deg_q_packed" in s:
|
||||||
|
pk, pad = s["deg_q_packed"], s["pad"]
|
||||||
|
if bits == 2:
|
||||||
|
dq = torch.stack([pk & 3, (pk >> 2) & 3, (pk >> 4) & 3, (pk >> 6) & 3], dim=-1).view(pk.shape[0], -1)
|
||||||
|
if pad > 0:
|
||||||
|
dq = dq[:, :-pad]
|
||||||
|
dq = dq.to(torch.int8) - 1
|
||||||
|
else:
|
||||||
|
dq = torch.stack([pk & 15, (pk >> 4) & 15], dim=-1).view(pk.shape[0], -1)
|
||||||
|
if pad > 0:
|
||||||
|
dq = dq[:, :-pad]
|
||||||
|
dq = dq.to(torch.int8) - 7
|
||||||
|
else:
|
||||||
|
dq = s.get("deg_q", torch.zeros(0, dtype=torch.int8))
|
||||||
|
|
||||||
|
_set(
|
||||||
|
model,
|
||||||
|
name,
|
||||||
|
TrueQuantLinear(
|
||||||
|
s["prot_q"],
|
||||||
|
s["prot_scale"],
|
||||||
|
s["prot_idx"],
|
||||||
|
dq,
|
||||||
|
s["deg_scale"],
|
||||||
|
s["deg_idx"],
|
||||||
|
s["out_features"],
|
||||||
|
s.get("bias"),
|
||||||
|
bits,
|
||||||
|
device=str(dev),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
return model.eval(), tokenizer, spec
|
||||||
|
|
||||||
|
|
||||||
151387
merges.txt
Normal file
151387
merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
118
modeling_nanollm.py
Normal file
118
modeling_nanollm.py
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
|
||||||
|
from transformers.models.qwen2.modeling_qwen2 import Qwen2ForCausalLM
|
||||||
|
|
||||||
|
class NanoInt8Linear(nn.Module):
|
||||||
|
def __init__(self, in_features, out_features, has_bias=False):
|
||||||
|
super().__init__()
|
||||||
|
self.in_features = int(in_features)
|
||||||
|
self.out_features = int(out_features)
|
||||||
|
self.has_bias = bool(has_bias)
|
||||||
|
self.register_buffer("q", torch.empty((self.out_features, self.in_features), dtype=torch.int8))
|
||||||
|
self.register_buffer("scale", torch.empty((self.out_features,), dtype=torch.float16))
|
||||||
|
if self.has_bias:
|
||||||
|
self.register_buffer("bias", torch.empty((self.out_features,), dtype=torch.float16))
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
dt = x.dtype
|
||||||
|
f = x.to(torch.float16).reshape(-1, x.shape[-1])
|
||||||
|
w = self.q.to(f.device, torch.float16) * self.scale.to(f.device).unsqueeze(1)
|
||||||
|
y = f @ w.t()
|
||||||
|
if self.has_bias:
|
||||||
|
y = y + self.bias.to(f.device)
|
||||||
|
return y.reshape(*x.shape[:-1], self.out_features).to(dt)
|
||||||
|
|
||||||
|
class NanoTrueQuantLinear(nn.Module):
|
||||||
|
def __init__(self, in_features, out_features, prot_rows, deg_rows, has_bias=False):
|
||||||
|
super().__init__()
|
||||||
|
self.in_features = int(in_features)
|
||||||
|
self.out_features = int(out_features)
|
||||||
|
self.has_bias = bool(has_bias)
|
||||||
|
self.register_buffer("prot_q", torch.empty((prot_rows, self.in_features), dtype=torch.int8))
|
||||||
|
self.register_buffer("prot_scale", torch.empty((prot_rows,), dtype=torch.float16))
|
||||||
|
self.register_buffer("prot_idx", torch.empty((prot_rows,), dtype=torch.long))
|
||||||
|
self.register_buffer("deg_q", torch.empty((deg_rows, self.in_features), dtype=torch.int8))
|
||||||
|
self.register_buffer("deg_scale", torch.empty((deg_rows,), dtype=torch.float16))
|
||||||
|
self.register_buffer("deg_idx", torch.empty((deg_rows,), dtype=torch.long))
|
||||||
|
if self.has_bias:
|
||||||
|
self.register_buffer("bias", torch.empty((self.out_features,), dtype=torch.float16))
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
dt = x.dtype
|
||||||
|
f = x.to(torch.float16).reshape(-1, x.shape[-1])
|
||||||
|
y = torch.zeros((f.shape[0], self.out_features), dtype=torch.float16, device=f.device)
|
||||||
|
if self.prot_q.shape[0] > 0:
|
||||||
|
w = self.prot_q.to(f.device, torch.float16) * self.prot_scale.to(f.device).unsqueeze(1)
|
||||||
|
y.index_copy_(-1, self.prot_idx.to(f.device), f @ w.t())
|
||||||
|
if self.deg_q.shape[0] > 0:
|
||||||
|
w = self.deg_q.to(f.device, torch.float16) * self.deg_scale.to(f.device).unsqueeze(1)
|
||||||
|
y.index_copy_(-1, self.deg_idx.to(f.device), f @ w.t())
|
||||||
|
if self.has_bias:
|
||||||
|
y = y + self.bias.to(f.device)
|
||||||
|
return y.reshape(*x.shape[:-1], self.out_features).to(dt)
|
||||||
|
|
||||||
|
class NanoEmbedding(nn.Module):
|
||||||
|
def __init__(self, num_embeddings, embedding_dim):
|
||||||
|
super().__init__()
|
||||||
|
self.num_embeddings = int(num_embeddings)
|
||||||
|
self.embedding_dim = int(embedding_dim)
|
||||||
|
self.register_buffer("q", torch.empty((self.num_embeddings, self.embedding_dim), dtype=torch.int8))
|
||||||
|
self.register_buffer("scale", torch.empty((self.num_embeddings,), dtype=torch.float16))
|
||||||
|
|
||||||
|
def forward(self, input_ids):
|
||||||
|
return self.q[input_ids].to(torch.float16) * self.scale[input_ids].to(torch.float16).unsqueeze(-1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class NanoTiedLMHead(nn.Module):
|
||||||
|
def __init__(self, embedding):
|
||||||
|
super().__init__()
|
||||||
|
self.register_buffer("q", embedding.q.detach().clone())
|
||||||
|
self.register_buffer("scale", embedding.scale.detach().clone())
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
w = self.q.to(x.device, torch.float16) * self.scale.to(x.device).unsqueeze(1)
|
||||||
|
return x.to(torch.float16) @ w.t()
|
||||||
|
|
||||||
|
def _set_module(root, name, module):
|
||||||
|
cur = root
|
||||||
|
parts = name.split(".")
|
||||||
|
for p in parts[:-1]:
|
||||||
|
cur = cur[int(p)] if p.isdigit() else getattr(cur, p)
|
||||||
|
setattr(cur, parts[-1], module)
|
||||||
|
|
||||||
|
class NanoQwenForCausalLM(Qwen2ForCausalLM):
|
||||||
|
config_class = Qwen2Config
|
||||||
|
|
||||||
|
def tie_weights(self, *args, **kwargs):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def mark_tied_weights_as_initialized(self, *args, **kwargs):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def __init__(self, config):
|
||||||
|
config.tie_word_embeddings = False
|
||||||
|
super().__init__(config)
|
||||||
|
self.config.tie_word_embeddings = False
|
||||||
|
self._tied_weights_keys = []
|
||||||
|
self.all_tied_weights_keys = {}
|
||||||
|
mods = getattr(config, "nanollm_modules", {})
|
||||||
|
for name, spec in mods.items():
|
||||||
|
kind = spec["kind"]
|
||||||
|
if kind == "embedding":
|
||||||
|
mod = NanoEmbedding(spec["num_embeddings"], spec["embedding_dim"])
|
||||||
|
elif kind == "int8_linear":
|
||||||
|
mod = NanoInt8Linear(spec["in_features"], spec["out_features"], spec.get("has_bias", False))
|
||||||
|
elif kind == "truequant_linear":
|
||||||
|
mod = NanoTrueQuantLinear(
|
||||||
|
spec["in_features"], spec["out_features"],
|
||||||
|
spec["prot_rows"], spec["deg_rows"],
|
||||||
|
spec.get("has_bias", False),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown Nano module kind: {kind}")
|
||||||
|
_set_module(self, name, mod)
|
||||||
|
if "lm_head" not in mods and isinstance(self.model.embed_tokens, NanoEmbedding):
|
||||||
|
self.lm_head = NanoTiedLMHead(self.model.embed_tokens)
|
||||||
54
nano_compact/chat_template.jinja
Normal file
54
nano_compact/chat_template.jinja
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
{%- if tools %}
|
||||||
|
{{- '<|im_start|>system\n' }}
|
||||||
|
{%- if messages[0]['role'] == 'system' %}
|
||||||
|
{{- messages[0]['content'] }}
|
||||||
|
{%- else %}
|
||||||
|
{{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
|
||||||
|
{%- endif %}
|
||||||
|
{{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
||||||
|
{%- for tool in tools %}
|
||||||
|
{{- "\n" }}
|
||||||
|
{{- tool | tojson }}
|
||||||
|
{%- endfor %}
|
||||||
|
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
||||||
|
{%- else %}
|
||||||
|
{%- if messages[0]['role'] == 'system' %}
|
||||||
|
{{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
|
||||||
|
{%- else %}
|
||||||
|
{{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- for message in messages %}
|
||||||
|
{%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
|
||||||
|
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
|
||||||
|
{%- elif message.role == "assistant" %}
|
||||||
|
{{- '<|im_start|>' + message.role }}
|
||||||
|
{%- if message.content %}
|
||||||
|
{{- '\n' + message.content }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- for tool_call in message.tool_calls %}
|
||||||
|
{%- if tool_call.function is defined %}
|
||||||
|
{%- set tool_call = tool_call.function %}
|
||||||
|
{%- endif %}
|
||||||
|
{{- '\n<tool_call>\n{"name": "' }}
|
||||||
|
{{- tool_call.name }}
|
||||||
|
{{- '", "arguments": ' }}
|
||||||
|
{{- tool_call.arguments | tojson }}
|
||||||
|
{{- '}\n</tool_call>' }}
|
||||||
|
{%- endfor %}
|
||||||
|
{{- '<|im_end|>\n' }}
|
||||||
|
{%- elif message.role == "tool" %}
|
||||||
|
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
|
||||||
|
{{- '<|im_start|>user' }}
|
||||||
|
{%- endif %}
|
||||||
|
{{- '\n<tool_response>\n' }}
|
||||||
|
{{- message.content }}
|
||||||
|
{{- '\n</tool_response>' }}
|
||||||
|
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
||||||
|
{{- '<|im_end|>\n' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|im_start|>assistant\n' }}
|
||||||
|
{%- endif %}
|
||||||
2231
nano_compact/config.json
Normal file
2231
nano_compact/config.json
Normal file
File diff suppressed because it is too large
Load Diff
3
nano_compact/model.safetensors
Normal file
3
nano_compact/model.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:d052d5b20bebde4836c9559d5c9baf6b65ad2af02bb10da9d23a7416a10e6970
|
||||||
|
size 14779293592
|
||||||
118
nano_compact/modeling_nanollm.py
Normal file
118
nano_compact/modeling_nanollm.py
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
|
||||||
|
from transformers.models.qwen2.modeling_qwen2 import Qwen2ForCausalLM
|
||||||
|
|
||||||
|
class NanoInt8Linear(nn.Module):
|
||||||
|
def __init__(self, in_features, out_features, has_bias=False):
|
||||||
|
super().__init__()
|
||||||
|
self.in_features = int(in_features)
|
||||||
|
self.out_features = int(out_features)
|
||||||
|
self.has_bias = bool(has_bias)
|
||||||
|
self.register_buffer("q", torch.empty((self.out_features, self.in_features), dtype=torch.int8))
|
||||||
|
self.register_buffer("scale", torch.empty((self.out_features,), dtype=torch.float16))
|
||||||
|
if self.has_bias:
|
||||||
|
self.register_buffer("bias", torch.empty((self.out_features,), dtype=torch.float16))
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
dt = x.dtype
|
||||||
|
f = x.to(torch.float16).reshape(-1, x.shape[-1])
|
||||||
|
w = self.q.to(f.device, torch.float16) * self.scale.to(f.device).unsqueeze(1)
|
||||||
|
y = f @ w.t()
|
||||||
|
if self.has_bias:
|
||||||
|
y = y + self.bias.to(f.device)
|
||||||
|
return y.reshape(*x.shape[:-1], self.out_features).to(dt)
|
||||||
|
|
||||||
|
class NanoTrueQuantLinear(nn.Module):
|
||||||
|
def __init__(self, in_features, out_features, prot_rows, deg_rows, has_bias=False):
|
||||||
|
super().__init__()
|
||||||
|
self.in_features = int(in_features)
|
||||||
|
self.out_features = int(out_features)
|
||||||
|
self.has_bias = bool(has_bias)
|
||||||
|
self.register_buffer("prot_q", torch.empty((prot_rows, self.in_features), dtype=torch.int8))
|
||||||
|
self.register_buffer("prot_scale", torch.empty((prot_rows,), dtype=torch.float16))
|
||||||
|
self.register_buffer("prot_idx", torch.empty((prot_rows,), dtype=torch.long))
|
||||||
|
self.register_buffer("deg_q", torch.empty((deg_rows, self.in_features), dtype=torch.int8))
|
||||||
|
self.register_buffer("deg_scale", torch.empty((deg_rows,), dtype=torch.float16))
|
||||||
|
self.register_buffer("deg_idx", torch.empty((deg_rows,), dtype=torch.long))
|
||||||
|
if self.has_bias:
|
||||||
|
self.register_buffer("bias", torch.empty((self.out_features,), dtype=torch.float16))
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
dt = x.dtype
|
||||||
|
f = x.to(torch.float16).reshape(-1, x.shape[-1])
|
||||||
|
y = torch.zeros((f.shape[0], self.out_features), dtype=torch.float16, device=f.device)
|
||||||
|
if self.prot_q.shape[0] > 0:
|
||||||
|
w = self.prot_q.to(f.device, torch.float16) * self.prot_scale.to(f.device).unsqueeze(1)
|
||||||
|
y.index_copy_(-1, self.prot_idx.to(f.device), f @ w.t())
|
||||||
|
if self.deg_q.shape[0] > 0:
|
||||||
|
w = self.deg_q.to(f.device, torch.float16) * self.deg_scale.to(f.device).unsqueeze(1)
|
||||||
|
y.index_copy_(-1, self.deg_idx.to(f.device), f @ w.t())
|
||||||
|
if self.has_bias:
|
||||||
|
y = y + self.bias.to(f.device)
|
||||||
|
return y.reshape(*x.shape[:-1], self.out_features).to(dt)
|
||||||
|
|
||||||
|
class NanoEmbedding(nn.Module):
|
||||||
|
def __init__(self, num_embeddings, embedding_dim):
|
||||||
|
super().__init__()
|
||||||
|
self.num_embeddings = int(num_embeddings)
|
||||||
|
self.embedding_dim = int(embedding_dim)
|
||||||
|
self.register_buffer("q", torch.empty((self.num_embeddings, self.embedding_dim), dtype=torch.int8))
|
||||||
|
self.register_buffer("scale", torch.empty((self.num_embeddings,), dtype=torch.float16))
|
||||||
|
|
||||||
|
def forward(self, input_ids):
|
||||||
|
return self.q[input_ids].to(torch.float16) * self.scale[input_ids].to(torch.float16).unsqueeze(-1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class NanoTiedLMHead(nn.Module):
|
||||||
|
def __init__(self, embedding):
|
||||||
|
super().__init__()
|
||||||
|
self.register_buffer("q", embedding.q.detach().clone())
|
||||||
|
self.register_buffer("scale", embedding.scale.detach().clone())
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
w = self.q.to(x.device, torch.float16) * self.scale.to(x.device).unsqueeze(1)
|
||||||
|
return x.to(torch.float16) @ w.t()
|
||||||
|
|
||||||
|
def _set_module(root, name, module):
|
||||||
|
cur = root
|
||||||
|
parts = name.split(".")
|
||||||
|
for p in parts[:-1]:
|
||||||
|
cur = cur[int(p)] if p.isdigit() else getattr(cur, p)
|
||||||
|
setattr(cur, parts[-1], module)
|
||||||
|
|
||||||
|
class NanoQwenForCausalLM(Qwen2ForCausalLM):
|
||||||
|
config_class = Qwen2Config
|
||||||
|
|
||||||
|
def tie_weights(self, *args, **kwargs):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def mark_tied_weights_as_initialized(self, *args, **kwargs):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def __init__(self, config):
|
||||||
|
config.tie_word_embeddings = False
|
||||||
|
super().__init__(config)
|
||||||
|
self.config.tie_word_embeddings = False
|
||||||
|
self._tied_weights_keys = []
|
||||||
|
self.all_tied_weights_keys = {}
|
||||||
|
mods = getattr(config, "nanollm_modules", {})
|
||||||
|
for name, spec in mods.items():
|
||||||
|
kind = spec["kind"]
|
||||||
|
if kind == "embedding":
|
||||||
|
mod = NanoEmbedding(spec["num_embeddings"], spec["embedding_dim"])
|
||||||
|
elif kind == "int8_linear":
|
||||||
|
mod = NanoInt8Linear(spec["in_features"], spec["out_features"], spec.get("has_bias", False))
|
||||||
|
elif kind == "truequant_linear":
|
||||||
|
mod = NanoTrueQuantLinear(
|
||||||
|
spec["in_features"], spec["out_features"],
|
||||||
|
spec["prot_rows"], spec["deg_rows"],
|
||||||
|
spec.get("has_bias", False),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown Nano module kind: {kind}")
|
||||||
|
_set_module(self, name, mod)
|
||||||
|
if "lm_head" not in mods and isinstance(self.model.embed_tokens, NanoEmbedding):
|
||||||
|
self.lm_head = NanoTiedLMHead(self.model.embed_tokens)
|
||||||
6
nano_compact/nano_compact_spec.json
Normal file
6
nano_compact/nano_compact_spec.json
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"format": "compact-safetensors-v1",
|
||||||
|
"base_model_id": "Qwen/Qwen2.5-14B-Instruct",
|
||||||
|
"artifact_dir": "/workspace/nano_rebuild/runs_14b/099/final_artifact_Qwen2.5-14B-Instruct",
|
||||||
|
"requires_trust_remote_code": true
|
||||||
|
}
|
||||||
3
nano_compact/tokenizer.json
Normal file
3
nano_compact/tokenizer.json
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:3fd169731d2cbde95e10bf356d66d5997fd885dd8dbb6fb4684da3f23b2585d8
|
||||||
|
size 11421892
|
||||||
30
nano_compact/tokenizer_config.json
Normal file
30
nano_compact/tokenizer_config.json
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
{
|
||||||
|
"add_prefix_space": false,
|
||||||
|
"backend": "tokenizers",
|
||||||
|
"bos_token": null,
|
||||||
|
"clean_up_tokenization_spaces": false,
|
||||||
|
"eos_token": "<|im_end|>",
|
||||||
|
"errors": "replace",
|
||||||
|
"extra_special_tokens": [
|
||||||
|
"<|im_start|>",
|
||||||
|
"<|im_end|>",
|
||||||
|
"<|object_ref_start|>",
|
||||||
|
"<|object_ref_end|>",
|
||||||
|
"<|box_start|>",
|
||||||
|
"<|box_end|>",
|
||||||
|
"<|quad_start|>",
|
||||||
|
"<|quad_end|>",
|
||||||
|
"<|vision_start|>",
|
||||||
|
"<|vision_end|>",
|
||||||
|
"<|vision_pad|>",
|
||||||
|
"<|image_pad|>",
|
||||||
|
"<|video_pad|>"
|
||||||
|
],
|
||||||
|
"is_local": true,
|
||||||
|
"local_files_only": false,
|
||||||
|
"model_max_length": 131072,
|
||||||
|
"pad_token": "<|endoftext|>",
|
||||||
|
"split_special_tokens": false,
|
||||||
|
"tokenizer_class": "Qwen2Tokenizer",
|
||||||
|
"unk_token": null
|
||||||
|
}
|
||||||
3
quantized_modules.pt
Normal file
3
quantized_modules.pt
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:a8498fb49136fc649878ca2d8f4bc7131b6d85548c7f63dacac76e6bd0873d74
|
||||||
|
size 1987647762
|
||||||
27
spec.json
Normal file
27
spec.json
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
{
|
||||||
|
"format": "nano-v3.1-multi",
|
||||||
|
"base_model_id": "Qwen/Qwen2.5-14B-Instruct",
|
||||||
|
"hidden_layers": 48,
|
||||||
|
"locked_count": 76,
|
||||||
|
"pending_8bit": 260,
|
||||||
|
"elapsed_seconds": 5959,
|
||||||
|
"build_reference_mode": "8bit",
|
||||||
|
"reference_scope": "original_baseline",
|
||||||
|
"pending_policy": "leave_in_base_8bit",
|
||||||
|
"pruned_from": "/kaggle/working/final_artifact_Qwen2.5-14B-Instruct",
|
||||||
|
"pruned_strategy": "remove_last_locked",
|
||||||
|
"pruned_removed_count": 9,
|
||||||
|
"pruned_removed_modules": [
|
||||||
|
"model.layers.42.mlp.down_proj",
|
||||||
|
"model.layers.45.mlp.gate_proj",
|
||||||
|
"model.layers.46.mlp.down_proj",
|
||||||
|
"model.layers.28.self_attn.v_proj",
|
||||||
|
"model.layers.29.mlp.down_proj",
|
||||||
|
"model.layers.34.mlp.gate_proj",
|
||||||
|
"model.layers.36.mlp.up_proj",
|
||||||
|
"model.layers.42.self_attn.k_proj",
|
||||||
|
"model.layers.46.self_attn.o_proj"
|
||||||
|
],
|
||||||
|
"self_contained": true,
|
||||||
|
"base_model_local_subdir": "."
|
||||||
|
}
|
||||||
303282
tokenizer.json
Normal file
303282
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
207
tokenizer_config.json
Normal file
207
tokenizer_config.json
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
{
|
||||||
|
"add_bos_token": false,
|
||||||
|
"add_prefix_space": false,
|
||||||
|
"added_tokens_decoder": {
|
||||||
|
"151643": {
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151644": {
|
||||||
|
"content": "<|im_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151645": {
|
||||||
|
"content": "<|im_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151646": {
|
||||||
|
"content": "<|object_ref_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151647": {
|
||||||
|
"content": "<|object_ref_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151648": {
|
||||||
|
"content": "<|box_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151649": {
|
||||||
|
"content": "<|box_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151650": {
|
||||||
|
"content": "<|quad_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151651": {
|
||||||
|
"content": "<|quad_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151652": {
|
||||||
|
"content": "<|vision_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151653": {
|
||||||
|
"content": "<|vision_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151654": {
|
||||||
|
"content": "<|vision_pad|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151655": {
|
||||||
|
"content": "<|image_pad|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151656": {
|
||||||
|
"content": "<|video_pad|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151657": {
|
||||||
|
"content": "<tool_call>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"151658": {
|
||||||
|
"content": "</tool_call>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"151659": {
|
||||||
|
"content": "<|fim_prefix|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"151660": {
|
||||||
|
"content": "<|fim_middle|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"151661": {
|
||||||
|
"content": "<|fim_suffix|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"151662": {
|
||||||
|
"content": "<|fim_pad|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"151663": {
|
||||||
|
"content": "<|repo_name|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"151664": {
|
||||||
|
"content": "<|file_sep|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additional_special_tokens": [
|
||||||
|
"<|im_start|>",
|
||||||
|
"<|im_end|>",
|
||||||
|
"<|object_ref_start|>",
|
||||||
|
"<|object_ref_end|>",
|
||||||
|
"<|box_start|>",
|
||||||
|
"<|box_end|>",
|
||||||
|
"<|quad_start|>",
|
||||||
|
"<|quad_end|>",
|
||||||
|
"<|vision_start|>",
|
||||||
|
"<|vision_end|>",
|
||||||
|
"<|vision_pad|>",
|
||||||
|
"<|image_pad|>",
|
||||||
|
"<|video_pad|>"
|
||||||
|
],
|
||||||
|
"bos_token": null,
|
||||||
|
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
||||||
|
"clean_up_tokenization_spaces": false,
|
||||||
|
"eos_token": "<|im_end|>",
|
||||||
|
"errors": "replace",
|
||||||
|
"model_max_length": 131072,
|
||||||
|
"pad_token": "<|endoftext|>",
|
||||||
|
"split_special_tokens": false,
|
||||||
|
"tokenizer_class": "Qwen2Tokenizer",
|
||||||
|
"unk_token": null
|
||||||
|
}
|
||||||
1
vocab.json
Normal file
1
vocab.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user