初始化项目,由ModelHub XC社区提供模型

Model: AMAImedia/Nemotron-Orchestrator-8B-Qwen3-BF16-NOESIS
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-04-20 09:19:59 +08:00
commit 0c4e1c7daf
16 changed files with 1204 additions and 0 deletions

36
.gitattributes vendored Normal file
View File

@@ -0,0 +1,36 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
tokenizer.json filter=lfs diff=lfs merge=lfs -text

203
LICENSE Normal file
View File

@@ -0,0 +1,203 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2024 Alibaba Cloud
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

145
README.md Normal file
View File

@@ -0,0 +1,145 @@
---
license: other
license_name: nvidia-open-model-license
license_link: LICENSE
language:
- en
library_name: transformers
tags:
- bf16
- orchestration
- tool-calling
- noesis
- dhcf-fno
- qwen3
base_model: nvidia/Nemotron-Orchestrator-8B
quantized_by: AMAImedia
pipeline_tag: text-generation
---
# Nemotron-Orchestrator-8B-Qwen3-BF16-NOESIS
**BF16 reference checkpoint of [nvidia/Nemotron-Orchestrator-8B](https://huggingface.co/nvidia/Nemotron-Orchestrator-8B),
losslessly cast from the original FP32 release.**
Released as part of the **NOESIS Professional Multilingual Dubbing Automation Platform**
(framework: DHCF-FNO — Deterministic Hybrid Control Framework for Frozen Neural Operators).
- **Founder:** Ilia Bolotnikov
- **Organization:** [AMAImedia.com](https://www.amaimedia.com)
- **X (Twitter):** [@AMAImediacom](https://x.com/AMAImediacom)
- **LinkedIn:** [Ilia Bolotnikov](https://www.linkedin.com/in/ilia-bolotnikov)
- **Telegram:** [@djbionicl](https://t.me/djbionicl)
- **NOESIS version:** v14.6
- **Release date:** 2026-04
---
## ⚠️ License notice
This model inherits the **NVIDIA Open Model License** from the upstream
`nvidia/Nemotron-Orchestrator-8B`. The base model is designated by NVIDIA as
**"for research and development only"**.
This BF16 derivative is published as a bandwidth-friendly reference checkpoint
for the broader research and development community. **Users are responsible
for compliance with NVIDIA's license terms** — see the `LICENSE` file in
this repository for the full text.
---
## Why this BF16 release exists
The original NVIDIA release ships in **FP32 (~32 GB on disk)**. Most modern
inference and quantization tooling (HuggingFace Transformers, vLLM, SGLang,
AutoAWQ, AutoGPTQ, llama.cpp BF16 conversion) immediately casts to BF16
on load. Publishing a pre-cast BF16 checkpoint:
- Halves download bandwidth (16 GB vs 32 GB)
- Halves disk footprint
- Skips a slow load-time cast for users
- Provides a clean BF16 baseline for downstream quantization recipes
The cast is performed via `torch.Tensor.to(dtype=torch.bfloat16)` with
IEEE 754 round-to-nearest-even (PyTorch default). BF16 has the same 8-bit
exponent range as FP32 and 7 bits of mantissa, which is **lossless for
inference-time use** of weight tensors.
---
## Model summary
| Property | Value |
| --- | --- |
| Base model | nvidia/Nemotron-Orchestrator-8B |
| Underlying architecture | Qwen3-8B (decoder-only transformer, **dense, NOT MoE**) |
| Source precision | FP32 |
| This release precision | BF16 |
| Vocab size | 151936 |
| Language | English (per base model) |
| Disk footprint | ~16 GB |
| Inference VRAM | ~17 GB BF16 (full-resident on 24 GB+ GPU) |
For low-VRAM (6-12 GB) inference, see the AWQ INT4 sibling release:
[amaimedia/Nemotron-Orchestrator-8B-Qwen3-AWQ-INT4-NOESIS](https://huggingface.co/amaimedia/Nemotron-Orchestrator-8B-Qwen3-AWQ-INT4-NOESIS).
---
## How to use
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
model_id = "amaimedia/Nemotron-Orchestrator-8B-Qwen3-BF16-NOESIS"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
device_map="auto",
)
prompt = "Plan a multi-step task: find recent AWQ papers, summarize the top three."
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
out = model.generate(**inputs, max_new_tokens=256, do_sample=False)
print(tokenizer.decode(out[0], skip_special_tokens=True))
```
---
## NOESIS context
This BF16 checkpoint is the source artifact for the AWQ INT4 quantization
used as the **English orchestration teacher** for NOESIS Specialist
**M9-ORCH-4B** during knowledge distillation.
NOESIS is a 9-specialist dubbing automation platform — see the NOESIS
collection for the full specialist family.
---
## Acknowledgements & citation
Base model: ToolOrchestra by NVIDIA & University of Hong Kong.
```bibtex
@misc{toolorchestra,
title = {ToolOrchestra: Elevating Intelligence via Efficient Model and Tool Orchestration},
author = {Hongjin Su and Shizhe Diao and Ximing Lu and others},
year = {2025},
eprint = {2511.21689},
archivePrefix = {arXiv}
}
```
NOESIS:
```bibtex
@misc{noesis_v14,
title = {NOESIS v14.6: DHCF-FNO Multilingual Dubbing Platform},
author = {Bolotnikov, Ilia},
year = {2026},
publisher = {AMAImedia},
url = {https://amaimedia.com}
}
```

187
README_NEMOTRON_BF16.md Normal file
View File

@@ -0,0 +1,187 @@
---
license: other
license_name: nvidia-open-model-license
license_link: LICENSE
language:
- en
library_name: transformers
tags:
- bf16
- orchestration
- tool-calling
- noesis
- dhcf-fno
- qwen3
base_model: nvidia/Nemotron-Orchestrator-8B
quantized_by: AMAImedia
pipeline_tag: text-generation
---
# Nemotron-Orchestrator-8B-Qwen3-BF16-NOESIS
**BF16 reference checkpoint of [nvidia/Nemotron-Orchestrator-8B](https://huggingface.co/nvidia/Nemotron-Orchestrator-8B), losslessly cast from the original FP32 release.**
This release halves the download bandwidth and disk footprint of the original model (from ~32 GB FP32 down to ~16 GB BF16) without any quality loss for inference, and serves as the source artifact for the AWQ INT4 quantization sibling.
Released as part of the **NOESIS Professional Multilingual Dubbing Automation Platform** (framework: DHCF-FNO — Deterministic Hybrid Control Framework for Frozen Neural Operators).
- **Founder:** Ilia Bolotnikov · **Telegram:** [@djbionicl](https://t.me/djbionicl)
- **Organization:** [AMAImedia.com](https://amaimedia.com)
- **NOESIS version:** v14.1 · **Release date:** 2026-04
- **License:** NVIDIA Open Model License (research and development only — see `LICENSE`)
---
## NOESIS context
NOESIS is a 9-specialist multilingual video dubbing platform (full dubbing in 30 languages, ASR in 150+). This BF16 checkpoint is the source artifact for the AWQ INT4 quantization used as the **English orchestration teacher** for NOESIS Specialist **M9-ORCH-4B** during knowledge distillation.
NOESIS specialists overview:
| ID | Role | Size |
| --- | --- | --- |
| M1 | ASR (150+ langs) | 10B/3B |
| M2 | Dubbing LM (30 langs full) | 10B/3B |
| M3 | TTS + voice cloning | 10B/3B |
| M4 | Chat + creative writing | 10B/3B |
| M5 | Code + math | 10B/3B |
| M6 | Deep research (1M ctx) | 10B/3B |
| M7 | Prompt engineering | 4B/0.8B |
| M8 | Quality control (PRM) | 4B/0.8B |
| **M9** | **Orchestrator + routing** | **4B/0.8B** |
---
## Conversion details
| Property | Value |
| --- | --- |
| Source precision | FP32 (~32 GB) |
| This release precision | BF16 (~15.3 GB) |
| Conversion method | `torch.Tensor.to(dtype=torch.bfloat16)` |
| Rounding | IEEE 754 round-to-nearest-even (PyTorch default) |
| Quality impact | None for inference (BF16 has same 8-bit exponent as FP32) |
For low-VRAM (6-12 GB) inference, see the AWQ INT4 sibling release:
[amaimedia/Nemotron-Orchestrator-8B-Qwen3-AWQ-INT4-NOESIS](https://huggingface.co/amaimedia/Nemotron-Orchestrator-8B-Qwen3-AWQ-INT4-NOESIS).
---
## How to use
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
model_id = "amaimedia/Nemotron-Orchestrator-8B-Qwen3-BF16-NOESIS"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
device_map="auto",
)
prompt = "Plan a multi-step task: find recent AWQ papers, summarize the top three."
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
out = model.generate(**inputs, max_new_tokens=256, do_sample=False)
print(tokenizer.decode(out[0], skip_special_tokens=True))
```
---
## ⚠️ License notice
This model inherits the **NVIDIA Open Model License** from the upstream `nvidia/Nemotron-Orchestrator-8B`. The base model is designated by NVIDIA as **"for research and development only"**.
This BF16 derivative is published as a bandwidth-friendly reference checkpoint for the broader research and development community. Users are responsible for compliance with NVIDIA's license terms — see the `LICENSE` file in this repository for the full text.
---
# Original Model Card
The following is the original model card from [nvidia/Nemotron-Orchestrator-8B](https://huggingface.co/nvidia/Nemotron-Orchestrator-8B), reproduced here for convenience. All credit for the underlying model goes to NVIDIA & the University of Hong Kong.
---
## ToolOrchestra: Elevating Intelligence via Efficient Model and Tool Orchestration
[![Paper](https://img.shields.io/badge/ArXiv-Paper-brown)](https://arxiv.org/abs/2511.21689)
[![Code](https://img.shields.io/badge/GitHub-Link-orange)](https://github.com/NVlabs/ToolOrchestra/)
[![Model](https://img.shields.io/badge/HuggingFace-Model-green)](https://huggingface.co/nvidia/Nemotron-Orchestrator-8B)
[![Data](https://img.shields.io/badge/HuggingFace-Data-blue)](https://huggingface.co/datasets/nvidia/ToolScale)
[![Website](https://img.shields.io/badge/Web-Page-purple)](https://research.nvidia.com/labs/lpr/ToolOrchestra/)
### Description
Orchestrator-8B is a state-of-the-art 8B parameter orchestration model designed to solve complex, multi-turn agentic tasks by coordinating a diverse set of expert models and tools.
On the Humanity's Last Exam (HLE) benchmark, ToolOrchestrator-8B achieves a score of 37.1%, outperforming GPT-5 (35.1%) while being approximately 2.5x more efficient.
This model is for research and development only.
### Key Features
* **Intelligent Orchestration:** Capable of managing heterogeneous toolsets including basic tools (search, code execution) and other LLMs (specialized and generalist).
* **Multi-Objective RL Training:** Trained via Group Relative Policy Optimization (GRPO) with a novel reward function that optimizes for accuracy, latency/cost, and adherence to user preferences.
* **Efficiency:** Delivers higher accuracy at significantly lower computational cost compared to monolithic frontier models.
* **Robust Generalization:** Demonstrated ability to generalize to unseen tools and pricing configurations.
### Benchmark
On Humanity's Last Exam, Orchestrator-8B achieves 37.1%, surpassing GPT-5 (35.1%) with only 30% monetary cost and 2.5x faster. On FRAMES and τ²-Bench, Orchestrator-8B consistently outperforms strong monolithic systems, demonstrating versatile reasoning and robust tool orchestration.
Orchestrator-8B consistently outperforms GPT-5, Claude Opus 4.1 and Qwen3-235B-A22B on HLE with substantially lower cost.
### Model Details
* **Developed by:** NVIDIA & University of Hong Kong
* **Model Type:** Decoder-only Transformer (dense, not MoE)
* **Base Model:** [Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B)
* **Parameters:** 8B
* **Language(s):** English
* **License:** NVIDIA License
### Model Version
1.0
### Training Dataset
| Dataset | Link |
| --- | --- |
| GeneralThought-430K | [Link](https://huggingface.co/datasets/natolambert/GeneralThought-430K-filtered) |
| ToolScale | [Link](https://huggingface.co/datasets/nvidia/ToolScale) |
### Ethical Considerations
NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse.
Please report model quality, risk, security vulnerabilities or NVIDIA AI Concerns [here](https://app.intigriti.com/programs/nvidia/nvidiavdp/detail).
### Citation
```bibtex
@misc{toolorchestra,
title = {ToolOrchestra: Elevating Intelligence via Efficient Model and Tool Orchestration},
author = {Hongjin Su and Shizhe Diao and Ximing Lu and Mingjie Liu and Jiacheng Xu and Xin Dong and Yonggan Fu and Peter Belcak and Hanrong Ye and Hongxu Yin and Yi Dong and Evelina Bakhturina and Tao Yu and Yejin Choi and Jan Kautz and Pavlo Molchanov},
year = {2025},
eprint = {2511.21689},
archivePrefix = {arXiv},
primaryClass = {cs.CL},
url = {https://arxiv.org/abs/2511.21689}
}
```
---
## NOESIS citation
```bibtex
@misc{noesis_v14,
title = {NOESIS v14.1: DHCF-FNO Multilingual Dubbing Platform},
author = {Bolotnikov, Ilia},
year = {2026},
publisher = {AMAImedia},
url = {https://amaimedia.com}
}
```

89
chat_template.jinja Normal file
View File

@@ -0,0 +1,89 @@
{%- if tools %}
{{- '<|im_start|>system\n' }}
{%- if messages[0].role == 'system' %}
{{- messages[0].content + '\n\n' }}
{%- endif %}
{{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
{%- for tool in tools %}
{{- "\n" }}
{{- tool | tojson }}
{%- endfor %}
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
{%- else %}
{%- if messages[0].role == 'system' %}
{{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
{%- for message in messages[::-1] %}
{%- set index = (messages|length - 1) - loop.index0 %}
{%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
{%- set ns.multi_step_tool = false %}
{%- set ns.last_query_index = index %}
{%- endif %}
{%- endfor %}
{%- for message in messages %}
{%- if message.content is string %}
{%- set content = message.content %}
{%- else %}
{%- set content = '' %}
{%- endif %}
{%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
{%- elif message.role == "assistant" %}
{%- set reasoning_content = '' %}
{%- if message.reasoning_content is string %}
{%- set reasoning_content = message.reasoning_content %}
{%- else %}
{%- if '</think>' in content %}
{%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
{%- set content = content.split('</think>')[-1].lstrip('\n') %}
{%- endif %}
{%- endif %}
{%- if loop.index0 > ns.last_query_index %}
{%- if loop.last or (not loop.last and reasoning_content) %}
{{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
{%- else %}
{{- '<|im_start|>' + message.role + '\n' + content }}
{%- endif %}
{%- else %}
{{- '<|im_start|>' + message.role + '\n' + content }}
{%- endif %}
{%- if message.tool_calls %}
{%- for tool_call in message.tool_calls %}
{%- if (loop.first and content) or (not loop.first) %}
{{- '\n' }}
{%- endif %}
{%- if tool_call.function %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- '<tool_call>\n{"name": "' }}
{{- tool_call.name }}
{{- '", "arguments": ' }}
{%- if tool_call.arguments is string %}
{{- tool_call.arguments }}
{%- else %}
{{- tool_call.arguments | tojson }}
{%- endif %}
{{- '}\n</tool_call>' }}
{%- endfor %}
{%- endif %}
{{- '<|im_end|>\n' }}
{%- elif message.role == "tool" %}
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
{{- '<|im_start|>user' }}
{%- endif %}
{{- '\n<tool_response>\n' }}
{{- content }}
{{- '\n</tool_response>' }}
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
{{- '<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- if enable_thinking is defined and enable_thinking is false %}
{{- '<think>\n\n</think>\n\n' }}
{%- endif %}
{%- endif %}

71
config.json Normal file
View File

@@ -0,0 +1,71 @@
{
"architectures": [
"Qwen3ForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": null,
"dtype": "bfloat16",
"eos_token_id": 151645,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 4096,
"initializer_range": 0.02,
"intermediate_size": 12288,
"layer_types": [
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention"
],
"max_position_embeddings": 40960,
"max_window_layers": 36,
"model_type": "qwen3",
"num_attention_heads": 32,
"num_hidden_layers": 36,
"num_key_value_heads": 8,
"pad_token_id": 151643,
"rms_norm_eps": 1e-06,
"rope_parameters": {
"rope_theta": 1000000,
"rope_type": "default"
},
"sliding_window": null,
"tie_word_embeddings": false,
"transformers_version": "5.6.0.dev0",
"use_cache": true,
"use_sliding_window": false,
"vocab_size": 151936
}

13
generation_config.json Normal file
View File

@@ -0,0 +1,13 @@
{
"bos_token_id": 151643,
"do_sample": true,
"eos_token_id": [
151645,
151643
],
"pad_token_id": 151643,
"temperature": 0.6,
"top_k": 20,
"top_p": 0.95,
"transformers_version": "4.51.3"
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d06b12dd140c4e473963ee45494f58e94256c0ee40ca6368b8790ec738594721
size 3990952896

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:24ef581c3752aa9aa09028237513170d3c3d3f05e5110f86b51e63b97a3be35b
size 3900892880

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:316f3e04a7a3566af5bcfb98128755ce2db9833bdab4eab19b415b440c743dac
size 3959604792

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7ca6f8d447764a0399505429be323ff57f56f7c11f93347eb219da73b7e65d2c
size 3959604784

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8b9dc168fa72fc43248c133b3dd7b3074f37712c2ee68b96dbbd808fcf802a57
size 570461432

View File

@@ -0,0 +1,407 @@
{
"metadata": {
"total_parameters": 8190735360,
"total_size": 16381470720
},
"weight_map": {
"lm_head.weight": "model-00001-of-00005.safetensors",
"model.embed_tokens.weight": "model-00001-of-00005.safetensors",
"model.layers.0.input_layernorm.weight": "model-00001-of-00005.safetensors",
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
"model.layers.0.self_attn.k_norm.weight": "model-00001-of-00005.safetensors",
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.0.self_attn.q_norm.weight": "model-00001-of-00005.safetensors",
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.1.input_layernorm.weight": "model-00001-of-00005.safetensors",
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
"model.layers.1.self_attn.k_norm.weight": "model-00001-of-00005.safetensors",
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.1.self_attn.q_norm.weight": "model-00001-of-00005.safetensors",
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.10.input_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.10.self_attn.k_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.10.self_attn.q_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.11.input_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.11.self_attn.k_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.11.self_attn.q_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.12.input_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.12.self_attn.k_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.12.self_attn.q_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.13.input_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.13.self_attn.k_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.13.self_attn.q_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.14.input_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.14.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.14.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.14.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.14.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.14.self_attn.k_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.14.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.14.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.14.self_attn.q_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.14.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.14.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.15.input_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.15.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.15.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.15.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.15.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.15.self_attn.k_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.15.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.15.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.15.self_attn.q_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.15.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.15.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.16.input_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.16.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.16.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.16.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.16.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.16.self_attn.k_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.16.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.16.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.16.self_attn.q_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.16.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.16.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.17.input_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.17.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.17.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.17.self_attn.k_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.17.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.17.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.17.self_attn.q_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.17.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.17.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.18.input_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.18.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.18.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.18.self_attn.k_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.18.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.18.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.18.self_attn.q_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.18.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.18.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.19.input_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.19.self_attn.k_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.19.self_attn.q_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.2.input_layernorm.weight": "model-00001-of-00005.safetensors",
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
"model.layers.2.self_attn.k_norm.weight": "model-00001-of-00005.safetensors",
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.2.self_attn.q_norm.weight": "model-00001-of-00005.safetensors",
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.20.input_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.20.self_attn.k_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.20.self_attn.q_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.21.input_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.21.self_attn.k_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.21.self_attn.q_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.22.input_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.22.self_attn.k_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.22.self_attn.q_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.23.input_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.23.self_attn.k_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.23.self_attn.q_norm.weight": "model-00003-of-00005.safetensors",
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.24.input_layernorm.weight": "model-00003-of-00005.safetensors",
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
"model.layers.24.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.24.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.24.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.24.self_attn.k_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.24.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.24.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.24.self_attn.q_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.24.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.24.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.25.input_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.25.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.25.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.25.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.25.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.25.self_attn.k_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.25.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.25.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.25.self_attn.q_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.25.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.25.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.26.input_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.26.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.26.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.26.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.26.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.26.self_attn.k_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.26.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.26.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.26.self_attn.q_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.26.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.26.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.27.input_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.27.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.27.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.27.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.27.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.27.self_attn.k_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.27.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.27.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.27.self_attn.q_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.27.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.27.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.28.input_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.28.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.28.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.28.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.28.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.28.self_attn.k_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.28.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.28.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.28.self_attn.q_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.28.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.28.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.29.input_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.29.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.29.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.29.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.29.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.29.self_attn.k_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.29.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.29.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.29.self_attn.q_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.29.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.29.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.3.input_layernorm.weight": "model-00001-of-00005.safetensors",
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
"model.layers.3.self_attn.k_norm.weight": "model-00001-of-00005.safetensors",
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
"model.layers.3.self_attn.q_norm.weight": "model-00001-of-00005.safetensors",
"model.layers.3.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.3.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.30.input_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.30.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.30.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.30.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.30.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.30.self_attn.k_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.30.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.30.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.30.self_attn.q_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.30.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.30.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.31.input_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.31.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.31.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.31.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.31.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.31.self_attn.k_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.31.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.31.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.31.self_attn.q_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.31.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.31.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.32.input_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.32.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.32.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.32.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.32.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.32.self_attn.k_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.32.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.32.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.32.self_attn.q_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.32.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.32.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.33.input_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.33.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.33.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.33.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.33.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.33.self_attn.k_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.33.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.33.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.33.self_attn.q_norm.weight": "model-00004-of-00005.safetensors",
"model.layers.33.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.33.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.34.input_layernorm.weight": "model-00004-of-00005.safetensors",
"model.layers.34.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.34.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
"model.layers.34.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
"model.layers.34.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
"model.layers.34.self_attn.k_norm.weight": "model-00005-of-00005.safetensors",
"model.layers.34.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
"model.layers.34.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
"model.layers.34.self_attn.q_norm.weight": "model-00005-of-00005.safetensors",
"model.layers.34.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
"model.layers.34.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
"model.layers.35.input_layernorm.weight": "model-00005-of-00005.safetensors",
"model.layers.35.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
"model.layers.35.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
"model.layers.35.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
"model.layers.35.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
"model.layers.35.self_attn.k_norm.weight": "model-00005-of-00005.safetensors",
"model.layers.35.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
"model.layers.35.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
"model.layers.35.self_attn.q_norm.weight": "model-00005-of-00005.safetensors",
"model.layers.35.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
"model.layers.35.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
"model.layers.4.input_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.4.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.4.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.4.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.4.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.4.self_attn.k_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.4.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.4.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.4.self_attn.q_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.4.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.4.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.5.input_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.5.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.5.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.5.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.5.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.5.self_attn.k_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.5.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.5.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.5.self_attn.q_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.5.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.5.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.6.input_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.6.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.6.self_attn.k_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.6.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.6.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.6.self_attn.q_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.6.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.6.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.7.input_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.7.self_attn.k_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.7.self_attn.q_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.8.input_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.8.self_attn.k_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.8.self_attn.q_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.9.input_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
"model.layers.9.self_attn.k_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.9.self_attn.q_norm.weight": "model-00002-of-00005.safetensors",
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
"model.norm.weight": "model-00005-of-00005.safetensors"
}
}

21
noesis_provenance.json Normal file
View File

@@ -0,0 +1,21 @@
{
"noesis_version": "v14.1",
"framework": "DHCF-FNO",
"founder": "Ilia Bolotnikov",
"organization": "AMAImedia.com",
"source_model": "nvidia/Nemotron-Orchestrator-8B",
"source_format": "FP32 safetensors",
"source_license": "NVIDIA Open Model License (research and development only)",
"base_model": "Qwen/Qwen3-8B",
"architecture": "Qwen3ForCausalLM (dense decoder-only, NO MoE)",
"vocab_size": 151936,
"language": "en",
"conversion": {
"operation": "FP32 -> BF16 cast",
"method": "torch.Tensor.to(dtype=torch.bfloat16)",
"rounding": "IEEE 754 round-to-nearest-even (PyTorch default)",
"lossless_for_inference": true,
"reason": "BF16 has same 8-bit exponent as FP32; 7-bit mantissa sufficient for weight storage"
},
"purpose": "Bandwidth-friendly BF16 reference checkpoint for downstream quantization and inference"
}

3
tokenizer.json Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:be75606093db2094d7cd20f3c2f385c212750648bd6ea4fb2bf507a6a4c55506
size 11422650

14
tokenizer_config.json Normal file
View File

@@ -0,0 +1,14 @@
{
"add_prefix_space": false,
"backend": "tokenizers",
"bos_token": null,
"clean_up_tokenization_spaces": false,
"eos_token": "<|im_end|>",
"errors": "replace",
"is_local": true,
"model_max_length": 131072,
"pad_token": "<|endoftext|>",
"split_special_tokens": false,
"tokenizer_class": "Qwen2Tokenizer",
"unk_token": null
}