commit 3ad005dd965afcd9c4dc5988e52e4c0d63ada6ab Author: ModelHub XC Date: Fri May 1 11:36:08 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: AMAImedia/Qwen3-8B-Nemotron-Orchestrator-NOESIS-BF16 Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..52373fe --- /dev/null +++ b/.gitattributes @@ -0,0 +1,36 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +tokenizer.json filter=lfs diff=lfs merge=lfs -text diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..fcc6dd4 --- /dev/null +++ b/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2024 Alibaba Cloud + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..bc7b8f1 --- /dev/null +++ b/README.md @@ -0,0 +1,145 @@ +--- +license: other +license_name: nvidia-open-model-license +license_link: LICENSE +language: +- en +library_name: transformers +tags: +- bf16 +- orchestration +- tool-calling +- noesis +- dhcf-fno +- qwen3 +base_model: nvidia/Nemotron-Orchestrator-8B +quantized_by: AMAImedia +pipeline_tag: text-generation +--- + +# Qwen3-8B-Nemotron-Orchestrator-NOESIS-BF16 + +**BF16 reference checkpoint of [nvidia/Nemotron-Orchestrator-8B](https://huggingface.co/nvidia/Nemotron-Orchestrator-8B), +losslessly cast from the original FP32 release.** + +Released as part of the **NOESIS Professional Multilingual Dubbing Automation Platform** +(framework: DHCF-FNO — Deterministic Hybrid Control Framework for Frozen Neural Operators). + +- **Founder:** Ilia Bolotnikov +- **Organization:** [AMAImedia.com](https://www.amaimedia.com) +- **X (Twitter):** [@AMAImediacom](https://x.com/AMAImediacom) +- **LinkedIn:** [Ilia Bolotnikov](https://www.linkedin.com/in/ilia-bolotnikov) +- **Telegram:** [@djbionicl](https://t.me/djbionicl) +- **NOESIS version:** v14.6 +- **Release date:** 2026-04 + +--- + +## ⚠️ License notice + +This model inherits the **NVIDIA Open Model License** from the upstream +`nvidia/Nemotron-Orchestrator-8B`. The base model is designated by NVIDIA as +**"for research and development only"**. + +This BF16 derivative is published as a bandwidth-friendly reference checkpoint +for the broader research and development community. **Users are responsible +for compliance with NVIDIA's license terms** — see the `LICENSE` file in +this repository for the full text. + +--- + +## Why this BF16 release exists + +The original NVIDIA release ships in **FP32 (~32 GB on disk)**. Most modern +inference and quantization tooling (HuggingFace Transformers, vLLM, SGLang, +AutoAWQ, AutoGPTQ, llama.cpp BF16 conversion) immediately casts to BF16 +on load. Publishing a pre-cast BF16 checkpoint: + +- Halves download bandwidth (16 GB vs 32 GB) +- Halves disk footprint +- Skips a slow load-time cast for users +- Provides a clean BF16 baseline for downstream quantization recipes + +The cast is performed via `torch.Tensor.to(dtype=torch.bfloat16)` with +IEEE 754 round-to-nearest-even (PyTorch default). BF16 has the same 8-bit +exponent range as FP32 and 7 bits of mantissa, which is **lossless for +inference-time use** of weight tensors. + +--- + +## Model summary + +| Property | Value | +| --- | --- | +| Base model | nvidia/Nemotron-Orchestrator-8B | +| Underlying architecture | Qwen3-8B (decoder-only transformer, **dense, NOT MoE**) | +| Source precision | FP32 | +| This release precision | BF16 | +| Vocab size | 151936 | +| Language | English (per base model) | +| Disk footprint | ~16 GB | +| Inference VRAM | ~17 GB BF16 (full-resident on 24 GB+ GPU) | + +For low-VRAM (6-12 GB) inference, see the AWQ INT4 sibling release: +[amaimedia/Nemotron-Orchestrator-8B-Qwen3-AWQ-INT4-NOESIS](https://huggingface.co/amaimedia/Nemotron-Orchestrator-8B-Qwen3-AWQ-INT4-NOESIS). + +--- + +## How to use + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer +import torch + +model_id = "amaimedia/Qwen3-8B-Nemotron-Orchestrator-NOESIS-BF16" +tokenizer = AutoTokenizer.from_pretrained(model_id) +model = AutoModelForCausalLM.from_pretrained( + model_id, + torch_dtype=torch.bfloat16, + device_map="auto", +) + +prompt = "Plan a multi-step task: find recent AWQ papers, summarize the top three." +inputs = tokenizer(prompt, return_tensors="pt").to(model.device) +out = model.generate(**inputs, max_new_tokens=256, do_sample=False) +print(tokenizer.decode(out[0], skip_special_tokens=True)) +``` + +--- + +## NOESIS context + +This BF16 checkpoint is the source artifact for the AWQ INT4 quantization +used as the **English orchestration teacher** for NOESIS Specialist +**M9-ORCH-4B** during knowledge distillation. + +NOESIS is a 9-specialist dubbing automation platform — see the NOESIS +collection for the full specialist family. + +--- + +## Acknowledgements & citation + +Base model: ToolOrchestra by NVIDIA & University of Hong Kong. + +```bibtex +@misc{toolorchestra, + title = {ToolOrchestra: Elevating Intelligence via Efficient Model and Tool Orchestration}, + author = {Hongjin Su and Shizhe Diao and Ximing Lu and others}, + year = {2025}, + eprint = {2511.21689}, + archivePrefix = {arXiv} +} +``` + +NOESIS: + +```bibtex +@misc{noesis_v14, + title = {NOESIS v14.6: DHCF-FNO Multilingual Dubbing Platform}, + author = {Bolotnikov, Ilia}, + year = {2026}, + publisher = {AMAImedia}, + url = {https://amaimedia.com} +} +``` diff --git a/chat_template.jinja b/chat_template.jinja new file mode 100644 index 0000000..b11e13e --- /dev/null +++ b/chat_template.jinja @@ -0,0 +1,89 @@ +{%- if tools %} + {{- '<|im_start|>system\n' }} + {%- if messages[0].role == 'system' %} + {{- messages[0].content + '\n\n' }} + {%- endif %} + {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within XML tags:\n" }} + {%- for tool in tools %} + {{- "\n" }} + {{- tool | tojson }} + {%- endfor %} + {{- "\n\n\nFor each function call, return a json object with function name and arguments within XML tags:\n\n{\"name\": , \"arguments\": }\n<|im_end|>\n" }} +{%- else %} + {%- if messages[0].role == 'system' %} + {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }} + {%- endif %} +{%- endif %} +{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %} +{%- for message in messages[::-1] %} + {%- set index = (messages|length - 1) - loop.index0 %} + {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('') and message.content.endswith('')) %} + {%- set ns.multi_step_tool = false %} + {%- set ns.last_query_index = index %} + {%- endif %} +{%- endfor %} +{%- for message in messages %} + {%- if message.content is string %} + {%- set content = message.content %} + {%- else %} + {%- set content = '' %} + {%- endif %} + {%- if (message.role == "user") or (message.role == "system" and not loop.first) %} + {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }} + {%- elif message.role == "assistant" %} + {%- set reasoning_content = '' %} + {%- if message.reasoning_content is string %} + {%- set reasoning_content = message.reasoning_content %} + {%- else %} + {%- if '' in content %} + {%- set reasoning_content = content.split('')[0].rstrip('\n').split('')[-1].lstrip('\n') %} + {%- set content = content.split('')[-1].lstrip('\n') %} + {%- endif %} + {%- endif %} + {%- if loop.index0 > ns.last_query_index %} + {%- if loop.last or (not loop.last and reasoning_content) %} + {{- '<|im_start|>' + message.role + '\n\n' + reasoning_content.strip('\n') + '\n\n\n' + content.lstrip('\n') }} + {%- else %} + {{- '<|im_start|>' + message.role + '\n' + content }} + {%- endif %} + {%- else %} + {{- '<|im_start|>' + message.role + '\n' + content }} + {%- endif %} + {%- if message.tool_calls %} + {%- for tool_call in message.tool_calls %} + {%- if (loop.first and content) or (not loop.first) %} + {{- '\n' }} + {%- endif %} + {%- if tool_call.function %} + {%- set tool_call = tool_call.function %} + {%- endif %} + {{- '\n{"name": "' }} + {{- tool_call.name }} + {{- '", "arguments": ' }} + {%- if tool_call.arguments is string %} + {{- tool_call.arguments }} + {%- else %} + {{- tool_call.arguments | tojson }} + {%- endif %} + {{- '}\n' }} + {%- endfor %} + {%- endif %} + {{- '<|im_end|>\n' }} + {%- elif message.role == "tool" %} + {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %} + {{- '<|im_start|>user' }} + {%- endif %} + {{- '\n\n' }} + {{- content }} + {{- '\n' }} + {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %} + {{- '<|im_end|>\n' }} + {%- endif %} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|im_start|>assistant\n' }} + {%- if enable_thinking is defined and enable_thinking is false %} + {{- '\n\n\n\n' }} + {%- endif %} +{%- endif %} \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..9200ada --- /dev/null +++ b/config.json @@ -0,0 +1,71 @@ +{ + "architectures": [ + "Qwen3ForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": null, + "dtype": "bfloat16", + "eos_token_id": 151645, + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 4096, + "initializer_range": 0.02, + "intermediate_size": 12288, + "layer_types": [ + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention" + ], + "max_position_embeddings": 40960, + "max_window_layers": 36, + "model_type": "qwen3", + "num_attention_heads": 32, + "num_hidden_layers": 36, + "num_key_value_heads": 8, + "pad_token_id": 151643, + "rms_norm_eps": 1e-06, + "rope_parameters": { + "rope_theta": 1000000, + "rope_type": "default" + }, + "sliding_window": null, + "tie_word_embeddings": false, + "transformers_version": "5.6.0.dev0", + "use_cache": true, + "use_sliding_window": false, + "vocab_size": 151936 +} diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..e4f1d31 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,13 @@ +{ + "bos_token_id": 151643, + "do_sample": true, + "eos_token_id": [ + 151645, + 151643 + ], + "pad_token_id": 151643, + "temperature": 0.6, + "top_k": 20, + "top_p": 0.95, + "transformers_version": "4.51.3" +} diff --git a/model-00001-of-00005.safetensors b/model-00001-of-00005.safetensors new file mode 100644 index 0000000..e6b2b0a --- /dev/null +++ b/model-00001-of-00005.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d06b12dd140c4e473963ee45494f58e94256c0ee40ca6368b8790ec738594721 +size 3990952896 diff --git a/model-00002-of-00005.safetensors b/model-00002-of-00005.safetensors new file mode 100644 index 0000000..a49bbb3 --- /dev/null +++ b/model-00002-of-00005.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24ef581c3752aa9aa09028237513170d3c3d3f05e5110f86b51e63b97a3be35b +size 3900892880 diff --git a/model-00003-of-00005.safetensors b/model-00003-of-00005.safetensors new file mode 100644 index 0000000..1a1061e --- /dev/null +++ b/model-00003-of-00005.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:316f3e04a7a3566af5bcfb98128755ce2db9833bdab4eab19b415b440c743dac +size 3959604792 diff --git a/model-00004-of-00005.safetensors b/model-00004-of-00005.safetensors new file mode 100644 index 0000000..fd4eefb --- /dev/null +++ b/model-00004-of-00005.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ca6f8d447764a0399505429be323ff57f56f7c11f93347eb219da73b7e65d2c +size 3959604784 diff --git a/model-00005-of-00005.safetensors b/model-00005-of-00005.safetensors new file mode 100644 index 0000000..7f759e0 --- /dev/null +++ b/model-00005-of-00005.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b9dc168fa72fc43248c133b3dd7b3074f37712c2ee68b96dbbd808fcf802a57 +size 570461432 diff --git a/model.safetensors.index.json b/model.safetensors.index.json new file mode 100644 index 0000000..7603ab1 --- /dev/null +++ b/model.safetensors.index.json @@ -0,0 +1,407 @@ +{ + "metadata": { + "total_parameters": 8190735360, + "total_size": 16381470720 + }, + "weight_map": { + "lm_head.weight": "model-00001-of-00005.safetensors", + "model.embed_tokens.weight": "model-00001-of-00005.safetensors", + "model.layers.0.input_layernorm.weight": "model-00001-of-00005.safetensors", + "model.layers.0.mlp.down_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.0.mlp.up_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00005.safetensors", + "model.layers.0.self_attn.k_norm.weight": "model-00001-of-00005.safetensors", + "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.0.self_attn.q_norm.weight": "model-00001-of-00005.safetensors", + "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.1.input_layernorm.weight": "model-00001-of-00005.safetensors", + "model.layers.1.mlp.down_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.1.mlp.up_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00005.safetensors", + "model.layers.1.self_attn.k_norm.weight": "model-00001-of-00005.safetensors", + "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.1.self_attn.q_norm.weight": "model-00001-of-00005.safetensors", + "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.10.input_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.10.mlp.down_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.10.mlp.up_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.10.self_attn.k_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.10.self_attn.q_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.11.input_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.11.mlp.down_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.11.mlp.up_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.11.self_attn.k_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.11.self_attn.q_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.12.input_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.12.mlp.down_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.12.mlp.up_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.12.self_attn.k_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.12.self_attn.q_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.13.input_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.13.mlp.down_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.13.mlp.up_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.13.self_attn.k_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.13.self_attn.q_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.14.input_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.14.mlp.down_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.14.mlp.gate_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.14.mlp.up_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.14.post_attention_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.14.self_attn.k_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.14.self_attn.k_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.14.self_attn.o_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.14.self_attn.q_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.14.self_attn.q_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.14.self_attn.v_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.15.input_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.15.mlp.down_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.15.mlp.gate_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.15.mlp.up_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.15.post_attention_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.15.self_attn.k_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.15.self_attn.k_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.15.self_attn.o_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.15.self_attn.q_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.15.self_attn.q_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.15.self_attn.v_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.16.input_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.16.mlp.down_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.16.mlp.up_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.16.self_attn.k_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.16.self_attn.q_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.16.self_attn.v_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.17.input_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.17.mlp.down_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.17.mlp.up_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.17.self_attn.k_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.17.self_attn.q_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.18.input_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.18.mlp.down_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.18.mlp.up_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.18.self_attn.k_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.18.self_attn.q_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.19.input_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.19.mlp.down_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.19.mlp.up_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.19.self_attn.k_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.19.self_attn.q_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.2.input_layernorm.weight": "model-00001-of-00005.safetensors", + "model.layers.2.mlp.down_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.2.mlp.up_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00005.safetensors", + "model.layers.2.self_attn.k_norm.weight": "model-00001-of-00005.safetensors", + "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.2.self_attn.q_norm.weight": "model-00001-of-00005.safetensors", + "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.20.input_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.20.mlp.down_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.20.mlp.up_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.20.self_attn.k_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.20.self_attn.q_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.21.input_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.21.mlp.down_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.21.mlp.up_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.21.self_attn.k_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.21.self_attn.q_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.22.input_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.22.mlp.down_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.22.mlp.up_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.22.self_attn.k_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.22.self_attn.q_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.23.input_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.23.mlp.down_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.23.mlp.up_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.23.self_attn.k_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.23.self_attn.q_norm.weight": "model-00003-of-00005.safetensors", + "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.24.input_layernorm.weight": "model-00003-of-00005.safetensors", + "model.layers.24.mlp.down_proj.weight": "model-00003-of-00005.safetensors", + "model.layers.24.mlp.gate_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.24.mlp.up_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.24.post_attention_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.24.self_attn.k_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.24.self_attn.k_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.24.self_attn.o_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.24.self_attn.q_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.24.self_attn.q_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.24.self_attn.v_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.25.input_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.25.mlp.down_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.25.mlp.gate_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.25.mlp.up_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.25.post_attention_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.25.self_attn.k_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.25.self_attn.k_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.25.self_attn.o_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.25.self_attn.q_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.25.self_attn.q_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.25.self_attn.v_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.26.input_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.26.mlp.down_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.26.mlp.gate_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.26.mlp.up_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.26.self_attn.k_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.26.self_attn.k_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.26.self_attn.o_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.26.self_attn.q_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.26.self_attn.q_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.26.self_attn.v_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.27.input_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.27.mlp.down_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.27.mlp.up_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.27.self_attn.k_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.27.self_attn.q_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.28.input_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.28.mlp.down_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.28.mlp.gate_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.28.mlp.up_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.28.post_attention_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.28.self_attn.k_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.28.self_attn.k_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.28.self_attn.o_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.28.self_attn.q_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.28.self_attn.q_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.28.self_attn.v_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.29.input_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.29.mlp.down_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.29.mlp.gate_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.29.mlp.up_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.29.post_attention_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.29.self_attn.k_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.29.self_attn.k_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.29.self_attn.o_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.29.self_attn.q_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.29.self_attn.q_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.29.self_attn.v_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.3.input_layernorm.weight": "model-00001-of-00005.safetensors", + "model.layers.3.mlp.down_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.3.mlp.up_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00005.safetensors", + "model.layers.3.self_attn.k_norm.weight": "model-00001-of-00005.safetensors", + "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00005.safetensors", + "model.layers.3.self_attn.q_norm.weight": "model-00001-of-00005.safetensors", + "model.layers.3.self_attn.q_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.3.self_attn.v_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.30.input_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.30.mlp.down_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.30.mlp.gate_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.30.mlp.up_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.30.post_attention_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.30.self_attn.k_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.30.self_attn.k_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.30.self_attn.q_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.30.self_attn.q_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.30.self_attn.v_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.31.input_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.31.mlp.down_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.31.mlp.up_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.31.self_attn.k_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.31.self_attn.k_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.31.self_attn.o_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.31.self_attn.q_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.31.self_attn.q_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.31.self_attn.v_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.32.input_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.32.mlp.down_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.32.mlp.gate_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.32.mlp.up_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.32.post_attention_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.32.self_attn.k_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.32.self_attn.k_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.32.self_attn.o_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.32.self_attn.q_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.32.self_attn.q_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.32.self_attn.v_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.33.input_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.33.mlp.down_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.33.mlp.gate_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.33.mlp.up_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.33.post_attention_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.33.self_attn.k_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.33.self_attn.k_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.33.self_attn.o_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.33.self_attn.q_norm.weight": "model-00004-of-00005.safetensors", + "model.layers.33.self_attn.q_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.33.self_attn.v_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.34.input_layernorm.weight": "model-00004-of-00005.safetensors", + "model.layers.34.mlp.down_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.34.mlp.gate_proj.weight": "model-00004-of-00005.safetensors", + "model.layers.34.mlp.up_proj.weight": "model-00005-of-00005.safetensors", + "model.layers.34.post_attention_layernorm.weight": "model-00005-of-00005.safetensors", + "model.layers.34.self_attn.k_norm.weight": "model-00005-of-00005.safetensors", + "model.layers.34.self_attn.k_proj.weight": "model-00005-of-00005.safetensors", + "model.layers.34.self_attn.o_proj.weight": "model-00005-of-00005.safetensors", + "model.layers.34.self_attn.q_norm.weight": "model-00005-of-00005.safetensors", + "model.layers.34.self_attn.q_proj.weight": "model-00005-of-00005.safetensors", + "model.layers.34.self_attn.v_proj.weight": "model-00005-of-00005.safetensors", + "model.layers.35.input_layernorm.weight": "model-00005-of-00005.safetensors", + "model.layers.35.mlp.down_proj.weight": "model-00005-of-00005.safetensors", + "model.layers.35.mlp.gate_proj.weight": "model-00005-of-00005.safetensors", + "model.layers.35.mlp.up_proj.weight": "model-00005-of-00005.safetensors", + "model.layers.35.post_attention_layernorm.weight": "model-00005-of-00005.safetensors", + "model.layers.35.self_attn.k_norm.weight": "model-00005-of-00005.safetensors", + "model.layers.35.self_attn.k_proj.weight": "model-00005-of-00005.safetensors", + "model.layers.35.self_attn.o_proj.weight": "model-00005-of-00005.safetensors", + "model.layers.35.self_attn.q_norm.weight": "model-00005-of-00005.safetensors", + "model.layers.35.self_attn.q_proj.weight": "model-00005-of-00005.safetensors", + "model.layers.35.self_attn.v_proj.weight": "model-00005-of-00005.safetensors", + "model.layers.4.input_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.4.mlp.down_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.4.mlp.gate_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.4.mlp.up_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.4.self_attn.k_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.4.self_attn.q_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.5.input_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.5.mlp.down_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.5.mlp.gate_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.5.mlp.up_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.5.self_attn.k_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.5.self_attn.q_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.6.input_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.6.mlp.down_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.6.mlp.up_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.6.self_attn.k_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.6.self_attn.q_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.7.input_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.7.mlp.down_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.7.mlp.up_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.7.self_attn.k_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.7.self_attn.q_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.8.input_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.8.mlp.down_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.8.mlp.up_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.8.self_attn.k_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.8.self_attn.q_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.9.input_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.9.mlp.down_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.9.mlp.up_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00005.safetensors", + "model.layers.9.self_attn.k_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.9.self_attn.q_norm.weight": "model-00002-of-00005.safetensors", + "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00005.safetensors", + "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00005.safetensors", + "model.norm.weight": "model-00005-of-00005.safetensors" + } +} diff --git a/noesis_provenance.json b/noesis_provenance.json new file mode 100644 index 0000000..9adf750 --- /dev/null +++ b/noesis_provenance.json @@ -0,0 +1,21 @@ +{ + "noesis_version": "v14.1", + "framework": "DHCF-FNO", + "founder": "Ilia Bolotnikov", + "organization": "AMAImedia.com", + "source_model": "nvidia/Nemotron-Orchestrator-8B", + "source_format": "FP32 safetensors", + "source_license": "NVIDIA Open Model License (research and development only)", + "base_model": "Qwen/Qwen3-8B", + "architecture": "Qwen3ForCausalLM (dense decoder-only, NO MoE)", + "vocab_size": 151936, + "language": "en", + "conversion": { + "operation": "FP32 -> BF16 cast", + "method": "torch.Tensor.to(dtype=torch.bfloat16)", + "rounding": "IEEE 754 round-to-nearest-even (PyTorch default)", + "lossless_for_inference": true, + "reason": "BF16 has same 8-bit exponent as FP32; 7-bit mantissa sufficient for weight storage" + }, + "purpose": "Bandwidth-friendly BF16 reference checkpoint for downstream quantization and inference" +} \ No newline at end of file diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..c7afbed --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be75606093db2094d7cd20f3c2f385c212750648bd6ea4fb2bf507a6a4c55506 +size 11422650 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..c71ba0c --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,14 @@ +{ + "add_prefix_space": false, + "backend": "tokenizers", + "bos_token": null, + "clean_up_tokenization_spaces": false, + "eos_token": "<|im_end|>", + "errors": "replace", + "is_local": true, + "model_max_length": 131072, + "pad_token": "<|endoftext|>", + "split_special_tokens": false, + "tokenizer_class": "Qwen2Tokenizer", + "unk_token": null +}