初始化项目,由ModelHub XC社区提供模型

Model: lzkhhh/ITDR-Qwen2.5-7B-Instruct
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-04-10 22:40:58 +08:00
commit cd71f35520
22 changed files with 909601 additions and 0 deletions

47
.gitattributes vendored Normal file
View File

@@ -0,0 +1,47 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zstandard filter=lfs diff=lfs merge=lfs -text
*.tfevents* filter=lfs diff=lfs merge=lfs -text
*.db* filter=lfs diff=lfs merge=lfs -text
*.ark* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.gguf* filter=lfs diff=lfs merge=lfs -text
*.ggml filter=lfs diff=lfs merge=lfs -text
*.llamafile* filter=lfs diff=lfs merge=lfs -text
*.pt2 filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text

14
Modelfile Normal file
View File

@@ -0,0 +1,14 @@
# ollama modelfile auto-generated by llamafactory
FROM .
TEMPLATE """{{ if .System }}<|im_start|>system
{{ .System }}<|im_end|>
{{ end }}{{ range .Messages }}{{ if eq .Role "user" }}<|im_start|>user
{{ .Content }}<|im_end|>
<|im_start|>assistant
{{ else if eq .Role "assistant" }}{{ .Content }}<|im_end|>
{{ end }}{{ end }}"""
PARAMETER stop "<|im_end|>"
PARAMETER num_ctx 4096

27
README.md Normal file
View File

@@ -0,0 +1,27 @@
---
license: mit
language:
- en
tasks:
- question-answering
- text-generation
- text-classification
- nli
- feature-extraction
- entity-typing
frameworks: PyTorch
base_model_relation: finetune
metrics:
- bleu
- accuracy
base_model:
- Qwen/Qwen2.5-7B-Instruct
---
## ITDR: An Instruction Tuning Dataset for Enhancing Large Language Models in Recommendations
## Introduction
Large language models (LLMs) have demonstrated outstanding performance in natural language processing tasks. However, in the field of recommendation systems, due to the structural differences between user behavior data and natural language, LLMs struggle to effectively model the associations between user preferences and items. Although prompt-based
methods can generate recommendation results, their inadequate understanding of recommendation tasks leads to constrained performance. To address this gap, in this work, we construct a sufficient instruction tuning dataset, ITDR, which
encompasses 7 subtasks across two core root tasks—useritem interaction and user-item understanding. The dataset integrates data from 13 public recommendation datasets and is built using manually crafted standardized templates, comprising approximately 200,000 instances. Experimental results demonstrate that ITDR significantly enhances the performance of mainstream open-source LLMs such as GLM-4,
Qwen2.5, Qwen2.5-Instruct and LLaMA-3.2 on recommendation tasks. Furthermore, we analyze the correlations between tasks and explore the impact of task descriptions and data scale on instruction tuning effectiveness. Finally, we perform comparative experiments against closed-source LLMs with
substantial parameters.

24
added_tokens.json Normal file
View File

@@ -0,0 +1,24 @@
{
"</tool_call>": 151658,
"<tool_call>": 151657,
"<|box_end|>": 151649,
"<|box_start|>": 151648,
"<|endoftext|>": 151643,
"<|file_sep|>": 151664,
"<|fim_middle|>": 151660,
"<|fim_pad|>": 151662,
"<|fim_prefix|>": 151659,
"<|fim_suffix|>": 151661,
"<|im_end|>": 151645,
"<|im_start|>": 151644,
"<|image_pad|>": 151655,
"<|object_ref_end|>": 151647,
"<|object_ref_start|>": 151646,
"<|quad_end|>": 151651,
"<|quad_start|>": 151650,
"<|repo_name|>": 151663,
"<|video_pad|>": 151656,
"<|vision_end|>": 151653,
"<|vision_pad|>": 151654,
"<|vision_start|>": 151652
}

28
config.json Normal file
View File

@@ -0,0 +1,28 @@
{
"architectures": [
"Qwen2ForCausalLM"
],
"attention_dropout": 0.0,
"bos_token_id": 151643,
"eos_token_id": 151645,
"hidden_act": "silu",
"hidden_size": 3584,
"initializer_range": 0.02,
"intermediate_size": 18944,
"max_position_embeddings": 32768,
"max_window_layers": 28,
"model_type": "qwen2",
"num_attention_heads": 28,
"num_hidden_layers": 28,
"num_key_value_heads": 4,
"rms_norm_eps": 1e-06,
"rope_scaling": null,
"rope_theta": 1000000.0,
"sliding_window": 131072,
"tie_word_embeddings": false,
"torch_dtype": "bfloat16",
"transformers_version": "4.51.3",
"use_cache": true,
"use_sliding_window": false,
"vocab_size": 152064
}

1
configuration.json Normal file
View File

@@ -0,0 +1 @@
{"task":"question-answering"}

14
generation_config.json Normal file
View File

@@ -0,0 +1,14 @@
{
"bos_token_id": 151643,
"do_sample": true,
"eos_token_id": [
151645,
151643
],
"pad_token_id": 151643,
"repetition_penalty": 1.05,
"temperature": 0.7,
"top_k": 20,
"top_p": 0.8,
"transformers_version": "4.51.3"
}

151388
merges.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a0460b159f38f30d381318107ede70848cdd7b39e5c7e51f1f3a914e17b185db
size 1886423520

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8cc4d5e983688ffd2635dfbefe04b4479008db07c3f5d7218f7821f7c53fec42
size 1864467800

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6f577540bb96b7458323ff3597801e03b948d72685ae5e6c3da1cc10c60e4668
size 1864467800

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7a2f80512cf472d0250b1e9ff684f26bff54ff58720b316fd04809e2ac667cca
size 1864467824

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:707f372fca1b8ae08232489219b98fd50ae14304c474604a5e866e0eca711b82
size 1864467848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:02c2b618288634fb3d3da3fda8583d43af1819ca7ccb9972456cac2cbe20290a
size 1864467848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:280d38f97ebe60b0eccb5943b543e35584f9ba59161584e4fc72602906b670f0
size 1864467848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3ea4d18cf6d8937a933be00ecd14c4f359b58535748b4027f1a0f7085021202a
size 1068046456

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:06006972c3be88e8a44fe21cfe2b0472b130780c781a741f8f90f1fe5ba3aae2
size 1089994880

View File

@@ -0,0 +1,346 @@
{
"metadata": {
"total_size": 15231233024
},
"weight_map": {
"lm_head.weight": "model-00009-of-00009.safetensors",
"model.embed_tokens.weight": "model-00001-of-00009.safetensors",
"model.layers.0.input_layernorm.weight": "model-00001-of-00009.safetensors",
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00009.safetensors",
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00009.safetensors",
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00009.safetensors",
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.1.input_layernorm.weight": "model-00002-of-00009.safetensors",
"model.layers.1.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.1.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00009.safetensors",
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00009.safetensors",
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00009.safetensors",
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
"model.layers.10.input_layernorm.weight": "model-00004-of-00009.safetensors",
"model.layers.10.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.10.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.10.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.10.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
"model.layers.10.self_attn.k_proj.bias": "model-00004-of-00009.safetensors",
"model.layers.10.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.10.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.10.self_attn.q_proj.bias": "model-00004-of-00009.safetensors",
"model.layers.10.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.10.self_attn.v_proj.bias": "model-00004-of-00009.safetensors",
"model.layers.10.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.11.input_layernorm.weight": "model-00004-of-00009.safetensors",
"model.layers.11.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.11.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.11.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.11.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
"model.layers.11.self_attn.k_proj.bias": "model-00004-of-00009.safetensors",
"model.layers.11.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.11.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.11.self_attn.q_proj.bias": "model-00004-of-00009.safetensors",
"model.layers.11.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.11.self_attn.v_proj.bias": "model-00004-of-00009.safetensors",
"model.layers.11.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.12.input_layernorm.weight": "model-00004-of-00009.safetensors",
"model.layers.12.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.12.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.12.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.12.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
"model.layers.12.self_attn.k_proj.bias": "model-00004-of-00009.safetensors",
"model.layers.12.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.12.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.12.self_attn.q_proj.bias": "model-00004-of-00009.safetensors",
"model.layers.12.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.12.self_attn.v_proj.bias": "model-00004-of-00009.safetensors",
"model.layers.12.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.13.input_layernorm.weight": "model-00005-of-00009.safetensors",
"model.layers.13.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.13.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.13.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.13.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
"model.layers.13.self_attn.k_proj.bias": "model-00004-of-00009.safetensors",
"model.layers.13.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.13.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.13.self_attn.q_proj.bias": "model-00004-of-00009.safetensors",
"model.layers.13.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.13.self_attn.v_proj.bias": "model-00004-of-00009.safetensors",
"model.layers.13.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.14.input_layernorm.weight": "model-00005-of-00009.safetensors",
"model.layers.14.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.14.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.14.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.14.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
"model.layers.14.self_attn.k_proj.bias": "model-00005-of-00009.safetensors",
"model.layers.14.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.14.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.14.self_attn.q_proj.bias": "model-00005-of-00009.safetensors",
"model.layers.14.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.14.self_attn.v_proj.bias": "model-00005-of-00009.safetensors",
"model.layers.14.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.15.input_layernorm.weight": "model-00005-of-00009.safetensors",
"model.layers.15.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.15.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.15.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.15.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
"model.layers.15.self_attn.k_proj.bias": "model-00005-of-00009.safetensors",
"model.layers.15.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.15.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.15.self_attn.q_proj.bias": "model-00005-of-00009.safetensors",
"model.layers.15.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.15.self_attn.v_proj.bias": "model-00005-of-00009.safetensors",
"model.layers.15.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.16.input_layernorm.weight": "model-00005-of-00009.safetensors",
"model.layers.16.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.16.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.16.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.16.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
"model.layers.16.self_attn.k_proj.bias": "model-00005-of-00009.safetensors",
"model.layers.16.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.16.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.16.self_attn.q_proj.bias": "model-00005-of-00009.safetensors",
"model.layers.16.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.16.self_attn.v_proj.bias": "model-00005-of-00009.safetensors",
"model.layers.16.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.17.input_layernorm.weight": "model-00006-of-00009.safetensors",
"model.layers.17.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.17.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.17.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.17.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
"model.layers.17.self_attn.k_proj.bias": "model-00005-of-00009.safetensors",
"model.layers.17.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.17.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.17.self_attn.q_proj.bias": "model-00005-of-00009.safetensors",
"model.layers.17.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.17.self_attn.v_proj.bias": "model-00005-of-00009.safetensors",
"model.layers.17.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
"model.layers.18.input_layernorm.weight": "model-00006-of-00009.safetensors",
"model.layers.18.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.18.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.18.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.18.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
"model.layers.18.self_attn.k_proj.bias": "model-00006-of-00009.safetensors",
"model.layers.18.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.18.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.18.self_attn.q_proj.bias": "model-00006-of-00009.safetensors",
"model.layers.18.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.18.self_attn.v_proj.bias": "model-00006-of-00009.safetensors",
"model.layers.18.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.19.input_layernorm.weight": "model-00006-of-00009.safetensors",
"model.layers.19.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.19.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.19.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.19.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
"model.layers.19.self_attn.k_proj.bias": "model-00006-of-00009.safetensors",
"model.layers.19.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.19.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.19.self_attn.q_proj.bias": "model-00006-of-00009.safetensors",
"model.layers.19.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.19.self_attn.v_proj.bias": "model-00006-of-00009.safetensors",
"model.layers.19.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.2.input_layernorm.weight": "model-00002-of-00009.safetensors",
"model.layers.2.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.2.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.2.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.2.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
"model.layers.2.self_attn.k_proj.bias": "model-00002-of-00009.safetensors",
"model.layers.2.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.2.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.2.self_attn.q_proj.bias": "model-00002-of-00009.safetensors",
"model.layers.2.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.2.self_attn.v_proj.bias": "model-00002-of-00009.safetensors",
"model.layers.2.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.20.input_layernorm.weight": "model-00006-of-00009.safetensors",
"model.layers.20.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.20.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.20.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.20.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
"model.layers.20.self_attn.k_proj.bias": "model-00006-of-00009.safetensors",
"model.layers.20.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.20.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.20.self_attn.q_proj.bias": "model-00006-of-00009.safetensors",
"model.layers.20.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.20.self_attn.v_proj.bias": "model-00006-of-00009.safetensors",
"model.layers.20.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.21.input_layernorm.weight": "model-00007-of-00009.safetensors",
"model.layers.21.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.21.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.21.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.21.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
"model.layers.21.self_attn.k_proj.bias": "model-00006-of-00009.safetensors",
"model.layers.21.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.21.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.21.self_attn.q_proj.bias": "model-00006-of-00009.safetensors",
"model.layers.21.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.21.self_attn.v_proj.bias": "model-00006-of-00009.safetensors",
"model.layers.21.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
"model.layers.22.input_layernorm.weight": "model-00007-of-00009.safetensors",
"model.layers.22.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.22.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.22.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.22.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
"model.layers.22.self_attn.k_proj.bias": "model-00007-of-00009.safetensors",
"model.layers.22.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.22.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.22.self_attn.q_proj.bias": "model-00007-of-00009.safetensors",
"model.layers.22.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.22.self_attn.v_proj.bias": "model-00007-of-00009.safetensors",
"model.layers.22.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.23.input_layernorm.weight": "model-00007-of-00009.safetensors",
"model.layers.23.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.23.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.23.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.23.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
"model.layers.23.self_attn.k_proj.bias": "model-00007-of-00009.safetensors",
"model.layers.23.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.23.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.23.self_attn.q_proj.bias": "model-00007-of-00009.safetensors",
"model.layers.23.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.23.self_attn.v_proj.bias": "model-00007-of-00009.safetensors",
"model.layers.23.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.24.input_layernorm.weight": "model-00007-of-00009.safetensors",
"model.layers.24.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.24.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.24.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.24.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
"model.layers.24.self_attn.k_proj.bias": "model-00007-of-00009.safetensors",
"model.layers.24.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.24.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.24.self_attn.q_proj.bias": "model-00007-of-00009.safetensors",
"model.layers.24.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.24.self_attn.v_proj.bias": "model-00007-of-00009.safetensors",
"model.layers.24.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.25.input_layernorm.weight": "model-00008-of-00009.safetensors",
"model.layers.25.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.25.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.25.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.25.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
"model.layers.25.self_attn.k_proj.bias": "model-00007-of-00009.safetensors",
"model.layers.25.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.25.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.25.self_attn.q_proj.bias": "model-00007-of-00009.safetensors",
"model.layers.25.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.25.self_attn.v_proj.bias": "model-00007-of-00009.safetensors",
"model.layers.25.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
"model.layers.26.input_layernorm.weight": "model-00008-of-00009.safetensors",
"model.layers.26.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.26.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.26.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.26.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
"model.layers.26.self_attn.k_proj.bias": "model-00008-of-00009.safetensors",
"model.layers.26.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.26.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.26.self_attn.q_proj.bias": "model-00008-of-00009.safetensors",
"model.layers.26.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.26.self_attn.v_proj.bias": "model-00008-of-00009.safetensors",
"model.layers.26.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.27.input_layernorm.weight": "model-00008-of-00009.safetensors",
"model.layers.27.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.27.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.27.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.27.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
"model.layers.27.self_attn.k_proj.bias": "model-00008-of-00009.safetensors",
"model.layers.27.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.27.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.27.self_attn.q_proj.bias": "model-00008-of-00009.safetensors",
"model.layers.27.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.27.self_attn.v_proj.bias": "model-00008-of-00009.safetensors",
"model.layers.27.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
"model.layers.3.input_layernorm.weight": "model-00002-of-00009.safetensors",
"model.layers.3.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.3.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.3.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.3.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
"model.layers.3.self_attn.k_proj.bias": "model-00002-of-00009.safetensors",
"model.layers.3.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.3.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.3.self_attn.q_proj.bias": "model-00002-of-00009.safetensors",
"model.layers.3.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.3.self_attn.v_proj.bias": "model-00002-of-00009.safetensors",
"model.layers.3.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.4.input_layernorm.weight": "model-00002-of-00009.safetensors",
"model.layers.4.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.4.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.4.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.4.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
"model.layers.4.self_attn.k_proj.bias": "model-00002-of-00009.safetensors",
"model.layers.4.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.4.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.4.self_attn.q_proj.bias": "model-00002-of-00009.safetensors",
"model.layers.4.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.4.self_attn.v_proj.bias": "model-00002-of-00009.safetensors",
"model.layers.4.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.5.input_layernorm.weight": "model-00003-of-00009.safetensors",
"model.layers.5.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.5.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.5.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.5.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
"model.layers.5.self_attn.k_proj.bias": "model-00002-of-00009.safetensors",
"model.layers.5.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.5.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.5.self_attn.q_proj.bias": "model-00002-of-00009.safetensors",
"model.layers.5.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.5.self_attn.v_proj.bias": "model-00002-of-00009.safetensors",
"model.layers.5.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
"model.layers.6.input_layernorm.weight": "model-00003-of-00009.safetensors",
"model.layers.6.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.6.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.6.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.6.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
"model.layers.6.self_attn.k_proj.bias": "model-00003-of-00009.safetensors",
"model.layers.6.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.6.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.6.self_attn.q_proj.bias": "model-00003-of-00009.safetensors",
"model.layers.6.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.6.self_attn.v_proj.bias": "model-00003-of-00009.safetensors",
"model.layers.6.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.7.input_layernorm.weight": "model-00003-of-00009.safetensors",
"model.layers.7.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.7.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.7.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.7.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
"model.layers.7.self_attn.k_proj.bias": "model-00003-of-00009.safetensors",
"model.layers.7.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.7.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.7.self_attn.q_proj.bias": "model-00003-of-00009.safetensors",
"model.layers.7.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.7.self_attn.v_proj.bias": "model-00003-of-00009.safetensors",
"model.layers.7.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.8.input_layernorm.weight": "model-00003-of-00009.safetensors",
"model.layers.8.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.8.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.8.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.8.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
"model.layers.8.self_attn.k_proj.bias": "model-00003-of-00009.safetensors",
"model.layers.8.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.8.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.8.self_attn.q_proj.bias": "model-00003-of-00009.safetensors",
"model.layers.8.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.8.self_attn.v_proj.bias": "model-00003-of-00009.safetensors",
"model.layers.8.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.9.input_layernorm.weight": "model-00004-of-00009.safetensors",
"model.layers.9.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
"model.layers.9.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.9.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.9.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
"model.layers.9.self_attn.k_proj.bias": "model-00003-of-00009.safetensors",
"model.layers.9.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.9.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.9.self_attn.q_proj.bias": "model-00003-of-00009.safetensors",
"model.layers.9.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
"model.layers.9.self_attn.v_proj.bias": "model-00003-of-00009.safetensors",
"model.layers.9.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
"model.norm.weight": "model-00008-of-00009.safetensors"
}
}

31
special_tokens_map.json Normal file
View File

@@ -0,0 +1,31 @@
{
"additional_special_tokens": [
"<|im_start|>",
"<|im_end|>",
"<|object_ref_start|>",
"<|object_ref_end|>",
"<|box_start|>",
"<|box_end|>",
"<|quad_start|>",
"<|quad_end|>",
"<|vision_start|>",
"<|vision_end|>",
"<|vision_pad|>",
"<|image_pad|>",
"<|video_pad|>"
],
"eos_token": {
"content": "<|im_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
}
}

757444
tokenizer.json Normal file

File diff suppressed because it is too large Load Diff

209
tokenizer_config.json Normal file
View File

@@ -0,0 +1,209 @@
{
"add_bos_token": false,
"add_prefix_space": false,
"added_tokens_decoder": {
"151643": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151644": {
"content": "<|im_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151645": {
"content": "<|im_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151646": {
"content": "<|object_ref_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151647": {
"content": "<|object_ref_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151648": {
"content": "<|box_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151649": {
"content": "<|box_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151650": {
"content": "<|quad_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151651": {
"content": "<|quad_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151652": {
"content": "<|vision_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151653": {
"content": "<|vision_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151654": {
"content": "<|vision_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151655": {
"content": "<|image_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151656": {
"content": "<|video_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151657": {
"content": "<tool_call>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151658": {
"content": "</tool_call>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151659": {
"content": "<|fim_prefix|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151660": {
"content": "<|fim_middle|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151661": {
"content": "<|fim_suffix|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151662": {
"content": "<|fim_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151663": {
"content": "<|repo_name|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151664": {
"content": "<|file_sep|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
}
},
"additional_special_tokens": [
"<|im_start|>",
"<|im_end|>",
"<|object_ref_start|>",
"<|object_ref_end|>",
"<|box_start|>",
"<|box_end|>",
"<|quad_start|>",
"<|quad_end|>",
"<|vision_start|>",
"<|vision_end|>",
"<|vision_pad|>",
"<|image_pad|>",
"<|video_pad|>"
],
"bos_token": null,
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
"clean_up_tokenization_spaces": false,
"eos_token": "<|im_end|>",
"errors": "replace",
"extra_special_tokens": {},
"model_max_length": 131072,
"pad_token": "<|endoftext|>",
"padding_side": "left",
"split_special_tokens": false,
"tokenizer_class": "Qwen2Tokenizer",
"unk_token": null
}

1
vocab.json Normal file

File diff suppressed because one or more lines are too long