初始化项目,由ModelHub XC社区提供模型

Model: mschill/dsa_llm
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-04-13 02:22:57 +08:00
commit c3147769c2
15 changed files with 1315 additions and 0 deletions

37
.gitattributes vendored Normal file
View File

@@ -0,0 +1,37 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
tokenizer.json filter=lfs diff=lfs merge=lfs -text
gpt-oss-20b.MXFP4.gguf filter=lfs diff=lfs merge=lfs -text

178
Modelfile Normal file
View File

@@ -0,0 +1,178 @@
FROM gpt-oss-20b.MXFP4.gguf
TEMPLATE """<|start|>system<|message|>You are ChatGPT, a large language model trained by OpenAI.
Knowledge cutoff: 2024-06
Current date: {{ currentDate }}
{{- if and .IsThinkSet .Think (ne .ThinkLevel "") }}
Reasoning: {{ .ThinkLevel }}
{{- else if or (not .IsThinkSet) (and .IsThinkSet .Think) }}
Reasoning: medium
{{- end }}
{{- $hasNonBuiltinTools := false }}
{{- if .Tools -}}
{{- $hasBrowserSearch := false }}
{{- $hasBrowserOpen := false }}
{{- $hasBrowserFind := false }}
{{- $hasPython := false }}
{{- range .Tools }}
{{- if eq .Function.Name "browser.search" -}}{{- $hasBrowserSearch = true -}}
{{- else if eq .Function.Name "browser.open" -}}{{- $hasBrowserOpen = true -}}
{{- else if eq .Function.Name "browser.find" -}}{{- $hasBrowserFind = true -}}
{{- else if eq .Function.Name "python" -}}{{- $hasPython = true -}}
{{- else }}{{ $hasNonBuiltinTools = true -}}
{{- end }}
{{- end }}
{{- if or $hasBrowserSearch $hasBrowserOpen $hasBrowserFind $hasPython }}
# Tools
{{- if or $hasBrowserSearch $hasBrowserOpen $hasBrowserFind }}
## browser
// Tool for browsing.
// The `cursor` appears in brackets before each browsing display: `[{cursor}]`.
// Cite information from the tool using the following format:
// `【{cursor}†L{line_start}(-L{line_end})?】`, for example: `【6†L9-L11】` or `【8†L3】`.
// Do not quote more than 10 words directly from the tool output.
// sources=web (default: web)
namespace browser {
{{- if $hasBrowserSearch }}
// Searches for information related to `query` and displays `topn` results.
type search = (_: {
query: string,
topn?: number, // default: 10
source?: string,
}) => any;
{{- end }}
{{- if $hasBrowserOpen }}
// Opens the link `id` from the page indicated by `cursor` starting at line number `loc`, showing `num_lines` lines.
// Valid link ids are displayed with the formatting: `【{id}†.*】`.
// If `cursor` is not provided, the most recent page is implied.
// If `id` is a string, it is treated as a fully qualified URL associated with `source`.
// If `loc` is not provided, the viewport will be positioned at the beginning of the document or centered on the most relevant passage, if available.
// Use this function without `id` to scroll to a new location of an opened page.
type open = (_: {
id?: number | string, // default: -1
cursor?: number, // default: -1
loc?: number, // default: -1
num_lines?: number, // default: -1
view_source?: boolean, // default: false
source?: string,
}) => any;
{{- end }}
{{- if $hasBrowserFind }}
// Finds exact matches of `pattern` in the current page, or the page given by `cursor`.
type find = (_: {
pattern: string,
cursor?: number, // default: -1
}) => any;
{{- end }}
} // namespace browser
{{- end }}{{/* end if has browser tools */}}
{{- if $hasPython }}
## python
Use this tool to execute Python code in your chain of thought. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files).
When you send a message containing Python code to python, it will be executed in a stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 120.0 seconds. The drive at '/mnt/data' can be used to save and persist user files. Internet access for this session is UNKNOWN. Depends on the cluster.
{{- end }}{{/* end if hasPython */}}
{{- end }}{{/* end if has any built-in tools */}}
{{- end }}{{/* end if .Tools */}}
# Valid channels: analysis, commentary, final. Channel must be included for every message.{{ if $hasNonBuiltinTools }}
Calls to these tools must go to the commentary channel: 'functions'.
{{- end -}}<|end|>{{/* end of system */ -}}
{{- if or $hasNonBuiltinTools .System -}}
<|start|>developer<|message|>{{- if $hasNonBuiltinTools }}# Tools
## functions
namespace functions {
{{- range .Tools }}
{{- if not (or (eq .Function.Name "browser.search") (eq .Function.Name "browser.open") (eq .Function.Name "browser.find") (eq .Function.Name "python")) }}
{{if .Function.Description }}
// {{ .Function.Description }}
{{- end }}
{{- if and .Function.Parameters.Properties (gt (len .Function.Parameters.Properties) 0) }}
type {{ .Function.Name }} = (_: {
{{- range $name, $prop := .Function.Parameters.Properties }}
{{- if $prop.Description }}
// {{ $prop.Description }}
{{- end }}
{{ $name }}: {{ if gt (len $prop.Type) 1 }}{{ range $i, $t := $prop.Type }}{{ if $i }} | {{ end }}{{ $t }}{{ end }}{{ else }}{{ index $prop.Type 0 }}{{ end }},
{{- end }}
}) => any;
{{- else }}
type {{ .Function.Name }} = () => any;
{{- end }}
{{- end }}{{/* end if not browser tool */}}
{{- end }}{{/* end of range .Tools */}}
} // namespace functions
{{- end }}{{/* end if hasNonBuiltinTools */}}
{{- if .System}}
# Instructions
{{ .System }}
{{- end -}}
<|end|>
{{- end -}}
{{- /* Find the index of the last user message */ -}}
{{- $lastUserIdx := -1 }}
{{- $prefillingContent := false }}
{{- $prefillingThinkingOnly := false }}
{{- range $i, $msg := .Messages }}
{{- $last := eq (len (slice $.Messages $i)) 1 -}}
{{- if eq $msg.Role "user" }}
{{- $lastUserIdx = $i }}
{{- end -}}
{{- if and $last (eq $msg.Role "assistant") (gt (len $msg.Content) 0) }}
{{- $prefillingContent = true }}
{{- else if and $last (eq $msg.Role "assistant") (gt (len $msg.Thinking) 0) }}
{{- $prefillingThinkingOnly = true }}
{{- end }}
{{- end -}}
{{- /* Now render messages */ -}}
{{- range $i, $msg := .Messages }}
{{- $last := eq (len (slice $.Messages $i)) 1 -}}
{{- if (ne $msg.Role "system") -}}
{{- if eq $msg.Role "tool" -}}
{{- if or (eq $msg.ToolName "python") (eq $msg.ToolName "browser.search") (eq $msg.ToolName "browser.open") (eq $msg.ToolName "browser.find") -}}
<|start|>{{ $msg.ToolName }} to=assistant<|message|>{{ $msg.Content }}<|end|>
{{- else -}}
<|start|>functions.{{ $msg.ToolName }} to=assistant<|message|>{{ $msg.Content }}<|end|>
{{- end -}}
{{- else if eq $msg.Role "assistant" -}}
{{- if and $msg.Thinking (gt $i $lastUserIdx) -}}{{- /* Show thinking only after last user message */ -}}
<|start|>assistant<|channel|>analysis<|message|>{{ $msg.Thinking }}{{- if not $prefillingThinkingOnly -}}<|end|>{{- end -}}
{{- end -}}
{{- if gt (len $msg.Content) 0 -}}
<|start|>assistant<|channel|>final<|message|>{{ $msg.Content }}{{- if not $prefillingContent -}}<|end|>{{- end -}}
{{- end -}}
{{- if gt (len $msg.ToolCalls) 0 -}}
{{- range $j, $toolCall := $msg.ToolCalls -}}
{{- $isBuiltin := or (eq $toolCall.Function.Name "python") (eq $toolCall.Function.Name "browser.search") (eq $toolCall.Function.Name "browser.open") (eq $toolCall.Function.Name "browser.find") -}}
<|start|>assistant<|channel|>{{ if $isBuiltin }}analysis{{ else }}commentary{{ end }} to={{ if not $isBuiltin}}functions.{{end}}{{ $toolCall.Function.Name }} <|constrain|>json<|message|>{{ $toolCall.Function.Arguments }}<|call|>
{{- end -}}
{{- end -}}
{{- else if eq $msg.Role "user" -}}
<|start|>{{ $msg.Role }}<|message|>{{ $msg.Content }}<|end|>
{{- end }}
{{- else }}
{{- end }}
{{- end -}}
{{- if not (or $prefillingContent $prefillingThinkingOnly) -}}
<|start|>assistant
{{- end -}}"""
PARAMETER temperature 1.0
PARAMETER top_k 0
PARAMETER top_p 1.0

21
README.md Normal file
View File

@@ -0,0 +1,21 @@
---
base_model: unsloth/gpt-oss-20b-unsloth-bnb-4bit
tags:
- text-generation-inference
- transformers
- unsloth
- gpt_oss
license: apache-2.0
language:
- en
---
# Uploaded finetuned model
- **Developed by:** mschill
- **License:** apache-2.0
- **Finetuned from model :** unsloth/gpt-oss-20b-unsloth-bnb-4bit
This gpt_oss model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)

51
adapter_config.json Normal file
View File

@@ -0,0 +1,51 @@
{
"alora_invocation_tokens": null,
"alpha_pattern": {},
"arrow_config": null,
"auto_mapping": {
"base_model_class": "GptOssForCausalLM",
"parent_library": "transformers.models.gpt_oss.modeling_gpt_oss",
"unsloth_fixed": true
},
"base_model_name_or_path": "unsloth/gpt-oss-20b-unsloth-bnb-4bit",
"bias": "none",
"corda_config": null,
"ensure_weight_tying": false,
"eva_config": null,
"exclude_modules": null,
"fan_in_fan_out": false,
"inference_mode": true,
"init_lora_weights": true,
"layer_replication": null,
"layers_pattern": null,
"layers_to_transform": null,
"loftq_config": {},
"lora_alpha": 128,
"lora_bias": false,
"lora_dropout": 0,
"megatron_config": null,
"megatron_core": "megatron.core",
"modules_to_save": null,
"peft_type": "LORA",
"peft_version": "0.18.1",
"qalora_group_size": 16,
"r": 128,
"rank_pattern": {},
"revision": null,
"target_modules": [
"q_proj",
"up_proj",
"down_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"lm_head"
],
"target_parameters": null,
"task_type": "CAUSAL_LM",
"trainable_token_indices": null,
"use_dora": false,
"use_qalora": false,
"use_rslora": true
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a38b44794f3b9c0cf6eeef9ada5d5340a20131b18e84545609a2296bb8f349dc
size 1517529216

315
chat_template.jinja Normal file
View File

@@ -0,0 +1,315 @@
{# Copyright 2025-present Unsloth. Apache 2.0 License. Unsloth chat template fixes. Edited from ggml-org & OpenAI #}
{#-
In addition to the normal inputs of `messages` and `tools`, this template also accepts the
following kwargs:
- "builtin_tools": A list, can contain "browser" and/or "python".
- "model_identity": A string that optionally describes the model identity.
- "reasoning_effort": A string that describes the reasoning effort, defaults to "medium".
#}
{#- Tool Definition Rendering ============================================== #}
{%- macro render_typescript_type(param_spec, required_params, is_nullable=false) -%}
{%- if param_spec.type == "array" -%}
{%- if param_spec['items'] -%}
{%- if param_spec['items']['type'] == "string" -%}
{{- "string[]" }}
{%- elif param_spec['items']['type'] == "number" -%}
{{- "number[]" }}
{%- elif param_spec['items']['type'] == "integer" -%}
{{- "number[]" }}
{%- elif param_spec['items']['type'] == "boolean" -%}
{{- "boolean[]" }}
{%- else -%}
{%- set inner_type = render_typescript_type(param_spec['items'], required_params) -%}
{%- if inner_type == "object | object" or inner_type|length > 50 -%}
{{- "any[]" }}
{%- else -%}
{{- inner_type + "[]" }}
{%- endif -%}
{%- endif -%}
{%- if param_spec.nullable -%}
{{- " | null" }}
{%- endif -%}
{%- else -%}
{{- "any[]" }}
{%- if param_spec.nullable -%}
{{- " | null" }}
{%- endif -%}
{%- endif -%}
{%- elif param_spec.type is defined and param_spec.type is iterable and param_spec.type is not string and param_spec.type is not mapping and param_spec.type[0] is defined -%}
{#- Handle array of types like ["object", "object"] from Union[dict, list] #}
{%- if param_spec.type | length > 1 -%}
{{- param_spec.type | join(" | ") }}
{%- else -%}
{{- param_spec.type[0] }}
{%- endif -%}
{%- elif param_spec.oneOf -%}
{#- Handle oneOf schemas - check for complex unions and fallback to any #}
{%- set has_object_variants = false -%}
{%- for variant in param_spec.oneOf -%}
{%- if variant.type == "object" -%}
{%- set has_object_variants = true -%}
{%- endif -%}
{%- endfor -%}
{%- if has_object_variants and param_spec.oneOf|length > 1 -%}
{{- "any" }}
{%- else -%}
{%- for variant in param_spec.oneOf -%}
{{- render_typescript_type(variant, required_params) -}}
{%- if variant.description %}
{{- "// " + variant.description }}
{%- endif -%}
{%- if variant.default is defined %}
{{ "// default: " + variant.default|tojson }}
{%- endif -%}
{%- if not loop.last %}
{{- " | " }}
{% endif -%}
{%- endfor -%}
{%- endif -%}
{%- elif param_spec.type == "string" -%}
{%- if param_spec.enum -%}
{{- '"' + param_spec.enum|join('" | "') + '"' -}}
{%- else -%}
{{- "string" }}
{%- if param_spec.nullable %}
{{- " | null" }}
{%- endif -%}
{%- endif -%}
{%- elif param_spec.type == "number" -%}
{{- "number" }}
{%- elif param_spec.type == "integer" -%}
{{- "number" }}
{%- elif param_spec.type == "boolean" -%}
{{- "boolean" }}
{%- elif param_spec.type == "object" -%}
{%- if param_spec.properties -%}
{{- "{\n" }}
{%- for prop_name, prop_spec in param_spec.properties.items() -%}
{{- prop_name -}}
{%- if prop_name not in (param_spec.required or []) -%}
{{- "?" }}
{%- endif -%}
{{- ": " }}
{{ render_typescript_type(prop_spec, param_spec.required or []) }}
{%- if not loop.last -%}
{{-", " }}
{%- endif -%}
{%- endfor -%}
{{- "}" }}
{%- else -%}
{{- "object" }}
{%- endif -%}
{%- else -%}
{{- "any" }}
{%- endif -%}
{%- endmacro -%}
{%- macro render_tool_namespace(namespace_name, tools) -%}
{{- "## " + namespace_name + "\n\n" }}
{{- "namespace " + namespace_name + " {\n\n" }}
{%- for tool in tools %}
{%- set tool = tool.function %}
{{- "// " + tool.description + "\n" }}
{{- "type "+ tool.name + " = " }}
{%- if tool.parameters and tool.parameters.properties -%}
{{- "(_: " }}
{{- "{\n" }}
{%- for param_name, param_spec in tool.parameters.properties.items() %}
{{- "// " + param_spec.description + "\n" }}
{{- param_name }}
{%- if param_name not in (tool.parameters.required or []) -%}
{{- "?" }}
{%- endif -%}
{{- ": " }}
{{- render_typescript_type(param_spec, tool.parameters.required or []) }}
{%- if param_spec.default is defined -%}
{%- if param_spec.enum %}
{{- ", // default: " + param_spec.default }}
{%- elif param_spec.oneOf %}
{{- "// default: " + param_spec.default }}
{%- else %}
{{- ", // default: " + param_spec.default|tojson }}
{%- endif -%}
{%- endif -%}
{%- if not loop.last %}
{{- ",\n" }}
{%- else %}
{{- "\n" }}
{%- endif -%}
{%- endfor %}
{{- "}) => any;\n\n" }}
{%- else -%}
{{- "() => any;\n\n" }}
{%- endif -%}
{%- endfor %}
{{- "} // namespace " + namespace_name }}
{%- endmacro -%}
{%- macro render_builtin_tools(browser_tool, python_tool) -%}
{%- if browser_tool %}
{{- "## browser\n\n" }}
{{- "// Tool for browsing.\n" }}
{{- "// The `cursor` appears in brackets before each browsing display: `[{cursor}]`.\n" }}
{{- "// Cite information from the tool using the following format:\n" }}
{{- "// `【{cursor}†L{line_start}(-L{line_end})?】`, for example: `【6†L9-L11】` or `【8†L3】`.\n" }}
{{- "// Do not quote more than 10 words directly from the tool output.\n" }}
{{- "// sources=web (default: web)\n" }}
{{- "namespace browser {\n\n" }}
{{- "// Searches for information related to `query` and displays `topn` results.\n" }}
{{- "type search = (_: {\n" }}
{{- "query: string,\n" }}
{{- "topn?: number, // default: 10\n" }}
{{- "source?: string,\n" }}
{{- "}) => any;\n\n" }}
{{- "// Opens the link `id` from the page indicated by `cursor` starting at line number `loc`, showing `num_lines` lines.\n" }}
{{- "// Valid link ids are displayed with the formatting: `【{id}†.*】`.\n" }}
{{- "// If `cursor` is not provided, the most recent page is implied.\n" }}
{{- "// If `id` is a string, it is treated as a fully qualified URL associated with `source`.\n" }}
{{- "// If `loc` is not provided, the viewport will be positioned at the beginning of the document or centered on the most relevant passage, if available.\n" }}
{{- "// Use this function without `id` to scroll to a new location of an opened page.\n" }}
{{- "type open = (_: {\n" }}
{{- "id?: number | string, // default: -1\n" }}
{{- "cursor?: number, // default: -1\n" }}
{{- "loc?: number, // default: -1\n" }}
{{- "num_lines?: number, // default: -1\n" }}
{{- "view_source?: boolean, // default: false\n" }}
{{- "source?: string,\n" }}
{{- "}) => any;\n\n" }}
{{- "// Finds exact matches of `pattern` in the current page, or the page given by `cursor`.\n" }}
{{- "type find = (_: {\n" }}
{{- "pattern: string,\n" }}
{{- "cursor?: number, // default: -1\n" }}
{{- "}) => any;\n\n" }}
{{- "} // namespace browser\n\n" }}
{%- endif -%}
{%- if python_tool %}
{{- "## python\n\n" }}
{{- "Use this tool to execute Python code in your chain of thought. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files).\n\n" }}
{{- "When you send a message containing Python code to python, it will be executed in a stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 120.0 seconds. The drive at '/mnt/data' can be used to save and persist user files. Internet access for this session is UNKNOWN. Depends on the cluster.\n\n" }}
{%- endif -%}
{%- endmacro -%}
{#- System Message Construction ============================================ #}
{%- macro build_system_message() -%}
{%- if model_identity is not defined %}
{{- "You are ChatGPT, a large language model trained by OpenAI.\n" -}}
{%- else %}
{{- model_identity }}
{%- endif %}
{{- "Knowledge cutoff: 2024-06\n" }}
{{- "Current date: " + strftime_now("%Y-%m-%d") + "\n\n" }}
{%- if reasoning_effort is not defined %}
{%- set reasoning_effort = "medium" %}
{%- endif %}
{{- "Reasoning: " + reasoning_effort + "\n\n" }}
{%- if builtin_tools is defined %}
{{- "# Tools\n\n" }}
{%- set available_builtin_tools = namespace(browser=false, python=false) %}
{%- for tool in builtin_tools %}
{%- if tool == "browser" %}
{%- set available_builtin_tools.browser = true %}
{%- elif tool == "python" %}
{%- set available_builtin_tools.python = true %}
{%- endif %}
{%- endfor %}
{{- render_builtin_tools(available_builtin_tools.browser, available_builtin_tools.python) }}
{%- endif -%}
{{- "# Valid channels: analysis, commentary, final. Channel must be included for every message." }}
{%- if tools is defined -%}
{{- "\nCalls to these tools must go to the commentary channel: 'functions'." }}
{%- endif -%}
{%- endmacro -%}
{#- Main Template Logic ================================================= #}
{#- Set defaults #}
{#- Render system message #}
{{- "<|start|>system<|message|>" }}
{{- build_system_message() }}
{{- "<|end|>" }}
{#- Extract developer message #}
{%- if messages[0].role == "developer" or messages[0].role == "system" %}
{%- set developer_message = messages[0].content %}
{%- set loop_messages = messages[1:] %}
{%- else %}
{%- set developer_message = "" %}
{%- set loop_messages = messages %}
{%- endif %}
{#- Render developer message #}
{%- if developer_message or tools %}
{{- "<|start|>developer<|message|>" }}
{%- if developer_message %}
{{- "# Instructions\n\n" }}
{{- developer_message }}
{%- endif %}
{%- if tools -%}
{{- "\n\n" }}
{{- "# Tools\n\n" }}
{{- render_tool_namespace("functions", tools) }}
{%- endif -%}
{{- "<|end|>" }}
{%- endif %}
{#- Render messages #}
{%- set last_tool_call = namespace(name=none) %}
{%- for message in loop_messages -%}
{#- At this point only assistant/user/tool messages should remain #}
{%- if message.role == 'assistant' -%}
{%- if "tool_calls" in message %}
{#- We assume max 1 tool call per message, and so we infer the tool call name #}
{#- in "tool" messages from the most recent assistant tool call name #}
{%- set tool_call = message.tool_calls[0] %}
{%- if tool_call.function %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{%- if message.content %}
{{- "<|start|>assistant<|channel|>analysis<|message|>" + message.content + "<|end|>" }}
{%- endif %}
{{- "<|start|>assistant to=" }}
{{- "functions." + tool_call.name + "<|channel|>commentary json<|message|>" }}
{{- tool_call.arguments|tojson }}
{{- "<|call|>" }}
{%- set last_tool_call.name = tool_call.name %}
{%- elif "thinking" in message and loop.last and not add_generation_prompt %}
{#- Only render the CoT if the final turn is an assistant turn and add_generation_prompt is false #}
{#- This is a situation that should only occur in training, never in inference. #}
{{- "<|start|>assistant<|channel|>analysis<|message|>" + message.thinking + "<|end|>" }}
{#- <|return|> indicates the end of generation, but <|end|> does not #}
{#- <|return|> should never be an input to the model, but we include it as the final token #}
{#- when training, so the model learns to emit it. #}
{{- "<|start|>assistant<|channel|>final<|message|>" + message.content + "<|return|>" }}
{%- set last_tool_call.name = none %}
{%- elif "thinking" in message %}
{#- CoT is dropped during all previous turns, so we never render it for inference #}
{{- "<|start|>assistant<|channel|>final<|message|>" + message.content + "<|end|>" }}
{%- set last_tool_call.name = none %}
{%- elif loop.last and not add_generation_prompt %}
{#- <|return|> indicates the end of generation, but <|end|> does not #}
{#- <|return|> should never be an input to the model, but we include it as the final token #}
{#- when training, so the model learns to emit it. #}
{{- "<|start|>assistant<|message|>" + message.content + "<|return|>" }}
{%- else %}
{{- "<|start|>assistant<|message|>" + message.content + "<|end|>" }}
{%- set last_tool_call.name = none %}
{%- endif %}
{%- elif message.role == 'tool' -%}
{%- if last_tool_call.name is none %}
{{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
{%- endif %}
{{- "<|start|>functions." + last_tool_call.name }}
{{- " to=assistant<|channel|>commentary<|message|>" + message.content|tojson + "<|end|>" }}
{%- else -%}
{{- "<|start|>user<|message|>" + message.content + "<|end|>" }}
{%- endif -%}
{%- endfor -%}
{#- Generation prompt #}
{%- if add_generation_prompt -%}
<|start|>assistant
{%- endif -%}
{# Copyright 2025-present Unsloth. Apache 2.0 License. Unsloth chat template fixes. Edited from ggml-org & OpenAI #}

70
config.json Normal file
View File

@@ -0,0 +1,70 @@
{
"architectures": [
"GptOssForCausalLM"
],
"attention_bias": true,
"attention_dropout": 0.0,
"bos_token_id": 199998,
"torch_dtype": "bfloat16",
"eos_token_id": 200002,
"experts_per_token": 4,
"head_dim": 64,
"hidden_act": "silu",
"hidden_size": 2880,
"initial_context_length": 4096,
"initializer_range": 0.02,
"intermediate_size": 2880,
"layer_types": [
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention"
],
"max_position_embeddings": 131072,
"model_type": "gpt_oss",
"num_attention_heads": 64,
"num_experts_per_tok": 4,
"num_hidden_layers": 24,
"num_key_value_heads": 8,
"num_local_experts": 32,
"output_router_logits": false,
"pad_token_id": 200017,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"beta_fast": 32.0,
"beta_slow": 1.0,
"factor": 32.0,
"original_max_position_embeddings": 4096,
"rope_type": "yarn",
"truncate": false
},
"rope_theta": 150000,
"router_aux_loss_coef": 0.9,
"sliding_window": 128,
"swiglu_limit": 7.0,
"tie_word_embeddings": false,
"transformers_version": "4.57.1",
"unsloth_version": "2026.1.4",
"use_cache": true,
"vocab_size": 201088
}

3
gpt-oss-20b.MXFP4.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9c108fd943f9b9d1ac853f4a457972beac0072dc6bba079cd09a506930ab4a5c
size 13792637024

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8b9201af8cf7bfb0579d8b03e49a17a0a8bb790eb4c4d00ab3f271e02ce81857
size 16487374472

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ae91f8d7d451689cc34d6c60582f7b8f2e1451465e417d2b5b7a72e40420da46
size 16493804200

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:589378f149abab9827cf39d2a2396957d5c789661844fb45e984f4df989fa7a0
size 8848382840

View File

@@ -0,0 +1,416 @@
{
"metadata": {},
"weight_map": {
"model.layers.0.input_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.0.mlp.experts.down_proj": "model-00000-of-00002.safetensors",
"model.layers.0.mlp.experts.down_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.0.mlp.experts.gate_up_proj": "model-00000-of-00002.safetensors",
"model.layers.0.mlp.experts.gate_up_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.0.mlp.router.bias": "model-00000-of-00002.safetensors",
"model.layers.0.mlp.router.weight": "model-00000-of-00002.safetensors",
"model.layers.0.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.0.self_attn.k_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.0.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.0.self_attn.o_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.0.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.0.self_attn.q_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.0.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.0.self_attn.sinks": "model-00000-of-00002.safetensors",
"model.layers.0.self_attn.v_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.0.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.1.input_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.1.mlp.experts.down_proj": "model-00000-of-00002.safetensors",
"model.layers.1.mlp.experts.down_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.1.mlp.experts.gate_up_proj": "model-00000-of-00002.safetensors",
"model.layers.1.mlp.experts.gate_up_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.1.mlp.router.bias": "model-00000-of-00002.safetensors",
"model.layers.1.mlp.router.weight": "model-00000-of-00002.safetensors",
"model.layers.1.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.1.self_attn.k_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.1.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.1.self_attn.o_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.1.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.1.self_attn.q_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.1.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.1.self_attn.sinks": "model-00000-of-00002.safetensors",
"model.layers.1.self_attn.v_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.1.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.10.input_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.10.mlp.experts.down_proj": "model-00000-of-00002.safetensors",
"model.layers.10.mlp.experts.down_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.10.mlp.experts.gate_up_proj": "model-00000-of-00002.safetensors",
"model.layers.10.mlp.experts.gate_up_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.10.mlp.router.bias": "model-00000-of-00002.safetensors",
"model.layers.10.mlp.router.weight": "model-00000-of-00002.safetensors",
"model.layers.10.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.10.self_attn.k_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.10.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.10.self_attn.o_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.10.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.10.self_attn.q_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.10.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.10.self_attn.sinks": "model-00000-of-00002.safetensors",
"model.layers.10.self_attn.v_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.10.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.11.input_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.11.mlp.experts.down_proj": "model-00000-of-00002.safetensors",
"model.layers.11.mlp.experts.down_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.11.mlp.experts.gate_up_proj": "model-00000-of-00002.safetensors",
"model.layers.11.mlp.experts.gate_up_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.11.mlp.router.bias": "model-00000-of-00002.safetensors",
"model.layers.11.mlp.router.weight": "model-00000-of-00002.safetensors",
"model.layers.11.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.11.self_attn.k_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.11.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.11.self_attn.o_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.11.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.11.self_attn.q_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.11.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.11.self_attn.sinks": "model-00000-of-00002.safetensors",
"model.layers.11.self_attn.v_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.11.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.12.input_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.12.mlp.experts.down_proj": "model-00000-of-00002.safetensors",
"model.layers.12.mlp.experts.down_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.12.mlp.experts.gate_up_proj": "model-00000-of-00002.safetensors",
"model.layers.12.mlp.experts.gate_up_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.12.mlp.router.bias": "model-00000-of-00002.safetensors",
"model.layers.12.mlp.router.weight": "model-00000-of-00002.safetensors",
"model.layers.12.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.12.self_attn.k_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.12.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.12.self_attn.o_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.12.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.12.self_attn.q_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.12.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.12.self_attn.sinks": "model-00000-of-00002.safetensors",
"model.layers.12.self_attn.v_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.12.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.13.input_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.13.mlp.experts.down_proj": "model-00000-of-00002.safetensors",
"model.layers.13.mlp.experts.down_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.13.mlp.experts.gate_up_proj": "model-00000-of-00002.safetensors",
"model.layers.13.mlp.experts.gate_up_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.13.mlp.router.bias": "model-00000-of-00002.safetensors",
"model.layers.13.mlp.router.weight": "model-00000-of-00002.safetensors",
"model.layers.13.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.13.self_attn.k_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.13.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.13.self_attn.o_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.13.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.13.self_attn.q_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.13.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.13.self_attn.sinks": "model-00000-of-00002.safetensors",
"model.layers.13.self_attn.v_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.13.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.14.input_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.14.mlp.experts.down_proj": "model-00000-of-00002.safetensors",
"model.layers.14.mlp.experts.down_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.14.mlp.experts.gate_up_proj": "model-00000-of-00002.safetensors",
"model.layers.14.mlp.experts.gate_up_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.14.mlp.router.bias": "model-00000-of-00002.safetensors",
"model.layers.14.mlp.router.weight": "model-00000-of-00002.safetensors",
"model.layers.14.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.14.self_attn.k_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.14.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.14.self_attn.o_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.14.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.14.self_attn.q_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.14.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.14.self_attn.sinks": "model-00000-of-00002.safetensors",
"model.layers.14.self_attn.v_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.14.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.15.input_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.15.mlp.experts.down_proj": "model-00000-of-00002.safetensors",
"model.layers.15.mlp.experts.down_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.15.mlp.experts.gate_up_proj": "model-00000-of-00002.safetensors",
"model.layers.15.mlp.experts.gate_up_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.15.mlp.router.bias": "model-00000-of-00002.safetensors",
"model.layers.15.mlp.router.weight": "model-00000-of-00002.safetensors",
"model.layers.15.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.15.self_attn.k_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.15.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.15.self_attn.o_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.15.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.15.self_attn.q_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.15.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.15.self_attn.sinks": "model-00000-of-00002.safetensors",
"model.layers.15.self_attn.v_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.15.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.16.input_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.16.mlp.experts.down_proj": "model-00000-of-00002.safetensors",
"model.layers.16.mlp.experts.down_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.16.mlp.experts.gate_up_proj": "model-00000-of-00002.safetensors",
"model.layers.16.mlp.experts.gate_up_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.16.mlp.router.bias": "model-00000-of-00002.safetensors",
"model.layers.16.mlp.router.weight": "model-00000-of-00002.safetensors",
"model.layers.16.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.16.self_attn.k_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.16.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.16.self_attn.o_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.16.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.16.self_attn.q_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.16.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.16.self_attn.sinks": "model-00000-of-00002.safetensors",
"model.layers.16.self_attn.v_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.16.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.17.input_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.17.mlp.experts.down_proj": "model-00000-of-00002.safetensors",
"model.layers.17.mlp.experts.down_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.17.mlp.experts.gate_up_proj": "model-00000-of-00002.safetensors",
"model.layers.17.mlp.experts.gate_up_proj_bias": "model-00000-of-00002.safetensors",
"model.layers.17.mlp.router.bias": "model-00000-of-00002.safetensors",
"model.layers.17.mlp.router.weight": "model-00000-of-00002.safetensors",
"model.layers.17.post_attention_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.17.self_attn.k_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.17.self_attn.k_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.17.self_attn.o_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.17.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.17.self_attn.q_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.17.self_attn.q_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.17.self_attn.sinks": "model-00000-of-00002.safetensors",
"model.layers.17.self_attn.v_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.17.self_attn.v_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.18.input_layernorm.weight": "model-00000-of-00002.safetensors",
"model.layers.18.self_attn.k_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.18.self_attn.o_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.18.self_attn.o_proj.weight": "model-00000-of-00002.safetensors",
"model.layers.18.self_attn.q_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.18.self_attn.v_proj.bias": "model-00000-of-00002.safetensors",
"model.layers.18.mlp.experts.down_proj": "model-00001-of-00002.safetensors",
"model.layers.18.mlp.experts.down_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.18.mlp.experts.gate_up_proj": "model-00001-of-00002.safetensors",
"model.layers.18.mlp.experts.gate_up_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.18.mlp.router.bias": "model-00001-of-00002.safetensors",
"model.layers.18.mlp.router.weight": "model-00001-of-00002.safetensors",
"model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.18.self_attn.sinks": "model-00001-of-00002.safetensors",
"model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.19.mlp.experts.down_proj": "model-00001-of-00002.safetensors",
"model.layers.19.mlp.experts.down_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.19.mlp.experts.gate_up_proj": "model-00001-of-00002.safetensors",
"model.layers.19.mlp.experts.gate_up_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.19.mlp.router.bias": "model-00001-of-00002.safetensors",
"model.layers.19.mlp.router.weight": "model-00001-of-00002.safetensors",
"model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.o_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.sinks": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.experts.down_proj": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.experts.down_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.experts.gate_up_proj": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.experts.gate_up_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.router.bias": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.router.weight": "model-00001-of-00002.safetensors",
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.o_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.sinks": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.20.mlp.experts.down_proj": "model-00001-of-00002.safetensors",
"model.layers.20.mlp.experts.down_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.20.mlp.experts.gate_up_proj": "model-00001-of-00002.safetensors",
"model.layers.20.mlp.experts.gate_up_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.20.mlp.router.bias": "model-00001-of-00002.safetensors",
"model.layers.20.mlp.router.weight": "model-00001-of-00002.safetensors",
"model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.20.self_attn.o_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.20.self_attn.sinks": "model-00001-of-00002.safetensors",
"model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.21.mlp.experts.down_proj": "model-00001-of-00002.safetensors",
"model.layers.21.mlp.experts.down_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.21.mlp.experts.gate_up_proj": "model-00001-of-00002.safetensors",
"model.layers.21.mlp.experts.gate_up_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.21.mlp.router.bias": "model-00001-of-00002.safetensors",
"model.layers.21.mlp.router.weight": "model-00001-of-00002.safetensors",
"model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.21.self_attn.o_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.21.self_attn.sinks": "model-00001-of-00002.safetensors",
"model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.22.mlp.experts.down_proj": "model-00001-of-00002.safetensors",
"model.layers.22.mlp.experts.down_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.22.mlp.experts.gate_up_proj": "model-00001-of-00002.safetensors",
"model.layers.22.mlp.experts.gate_up_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.22.mlp.router.bias": "model-00001-of-00002.safetensors",
"model.layers.22.mlp.router.weight": "model-00001-of-00002.safetensors",
"model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.22.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.22.self_attn.o_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.22.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.22.self_attn.sinks": "model-00001-of-00002.safetensors",
"model.layers.22.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.23.mlp.experts.down_proj": "model-00001-of-00002.safetensors",
"model.layers.23.mlp.experts.down_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.23.mlp.experts.gate_up_proj": "model-00001-of-00002.safetensors",
"model.layers.23.mlp.experts.gate_up_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.23.mlp.router.bias": "model-00001-of-00002.safetensors",
"model.layers.23.mlp.router.weight": "model-00001-of-00002.safetensors",
"model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.23.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.23.self_attn.o_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.23.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.23.self_attn.sinks": "model-00001-of-00002.safetensors",
"model.layers.23.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.experts.down_proj": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.experts.down_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.experts.gate_up_proj": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.experts.gate_up_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.router.bias": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.router.weight": "model-00001-of-00002.safetensors",
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.o_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.sinks": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.experts.down_proj": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.experts.down_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.experts.gate_up_proj": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.experts.gate_up_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.router.bias": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.router.weight": "model-00001-of-00002.safetensors",
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.o_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.sinks": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.experts.down_proj": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.experts.down_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.experts.gate_up_proj": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.experts.gate_up_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.router.bias": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.router.weight": "model-00001-of-00002.safetensors",
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.o_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.sinks": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.experts.gate_up_proj_bias": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.router.bias": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.router.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.o_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.sinks": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"lm_head.weight": "model-00002-of-00002.safetensors",
"model.embed_tokens.weight": "model-00002-of-00002.safetensors",
"model.layers.6.mlp.experts.down_proj": "model-00002-of-00002.safetensors",
"model.layers.6.mlp.experts.down_proj_bias": "model-00002-of-00002.safetensors",
"model.layers.6.mlp.experts.gate_up_proj": "model-00002-of-00002.safetensors",
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.7.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.7.mlp.experts.down_proj": "model-00002-of-00002.safetensors",
"model.layers.7.mlp.experts.down_proj_bias": "model-00002-of-00002.safetensors",
"model.layers.7.mlp.experts.gate_up_proj": "model-00002-of-00002.safetensors",
"model.layers.7.mlp.experts.gate_up_proj_bias": "model-00002-of-00002.safetensors",
"model.layers.7.mlp.router.bias": "model-00002-of-00002.safetensors",
"model.layers.7.mlp.router.weight": "model-00002-of-00002.safetensors",
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.7.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.7.self_attn.o_proj.bias": "model-00002-of-00002.safetensors",
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.7.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.7.self_attn.sinks": "model-00002-of-00002.safetensors",
"model.layers.7.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.8.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.8.mlp.experts.down_proj": "model-00002-of-00002.safetensors",
"model.layers.8.mlp.experts.down_proj_bias": "model-00002-of-00002.safetensors",
"model.layers.8.mlp.experts.gate_up_proj": "model-00002-of-00002.safetensors",
"model.layers.8.mlp.experts.gate_up_proj_bias": "model-00002-of-00002.safetensors",
"model.layers.8.mlp.router.bias": "model-00002-of-00002.safetensors",
"model.layers.8.mlp.router.weight": "model-00002-of-00002.safetensors",
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.8.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.8.self_attn.o_proj.bias": "model-00002-of-00002.safetensors",
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.8.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.8.self_attn.sinks": "model-00002-of-00002.safetensors",
"model.layers.8.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.9.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.9.mlp.experts.down_proj": "model-00002-of-00002.safetensors",
"model.layers.9.mlp.experts.down_proj_bias": "model-00002-of-00002.safetensors",
"model.layers.9.mlp.experts.gate_up_proj": "model-00002-of-00002.safetensors",
"model.layers.9.mlp.experts.gate_up_proj_bias": "model-00002-of-00002.safetensors",
"model.layers.9.mlp.router.bias": "model-00002-of-00002.safetensors",
"model.layers.9.mlp.router.weight": "model-00002-of-00002.safetensors",
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.9.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.9.self_attn.o_proj.bias": "model-00002-of-00002.safetensors",
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.9.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.9.self_attn.sinks": "model-00002-of-00002.safetensors",
"model.layers.9.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
"model.norm.weight": "model-00002-of-00002.safetensors"
}
}

23
special_tokens_map.json Normal file
View File

@@ -0,0 +1,23 @@
{
"bos_token": {
"content": "<|startoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "<|return|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "<|reserved_200017|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
}
}

3
tokenizer.json Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0614fe83cadab421296e664e1f48f4261fa8fef6e03e63bb75c20f38e37d07d3
size 27868174

186
tokenizer_config.json Normal file

File diff suppressed because one or more lines are too long