From e3908e720062aa0eb3223e0d993c1e3856eb008c Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Sat, 18 Apr 2026 14:01:34 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: jamesjk4242/insighta-mandala-v13 Source: Original Platform --- .gitattributes | 38 +++++++++++++ README.md | 57 +++++++++++++++++++ chat_template.jinja | 97 ++++++++++++++++++++++++++++++++ config.json | 71 +++++++++++++++++++++++ generation_config.json | 13 +++++ insighta-mandala-v13-Q4_K_M.gguf | 3 + insighta-mandala-v13-Q8_0.gguf | 3 + model.safetensors | 3 + tokenizer.json | 3 + tokenizer_config.json | 15 +++++ 10 files changed, 303 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 chat_template.jinja create mode 100644 config.json create mode 100644 generation_config.json create mode 100644 insighta-mandala-v13-Q4_K_M.gguf create mode 100644 insighta-mandala-v13-Q8_0.gguf create mode 100644 model.safetensors create mode 100644 tokenizer.json create mode 100644 tokenizer_config.json diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..051b384 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,38 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +tokenizer.json filter=lfs diff=lfs merge=lfs -text +insighta-mandala-v13-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +insighta-mandala-v13-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..e947db6 --- /dev/null +++ b/README.md @@ -0,0 +1,57 @@ +--- +pipeline_tag: text-generation +library_name: transformers +tags: + - qwen3 + - gguf + - mandala + - education + - fine-tuned +language: + - ko + - en +license: apache-2.0 +--- + +# Insighta Mandala v13 + +Fine-tuned Qwen3-4B model for generating mandala learning plans in JSON format. + +## Model Details + +- **Base model**: Qwen/Qwen3-4B +- **Fine-tuning**: LoRA on mandala learning plan generation task +- **Languages**: Korean, English +- **Output format**: Structured JSON (mandala chart format) + +## Available Quantizations + +| Format | Size | Description | +|--------|------|-------------| +| `model.safetensors` | ~8GB | Full F16 weights | +| `insighta-mandala-v13-Q8_0.gguf` | ~4GB | Q8_0 quantized GGUF | +| `insighta-mandala-v13-Q4_K_M.gguf` | ~2.4GB | Q4_K_M quantized GGUF (recommended for CPU) | + +## Usage + +### With llama-cpp-python + +```python +from llama_cpp import Llama + +llm = Llama(model_path="insighta-mandala-v13-Q4_K_M.gguf", n_ctx=4096) +output = llm( + "<|im_start|>user\nTOEFL 100점 만다라트 차트를 만들어줘<|im_end|>\n<|im_start|>assistant\n", + max_tokens=2048, + temperature=0.7, +) +print(output["choices"][0]["text"]) +``` + +### With HF Space API + +```bash +curl -X POST https://jamesjk4242-insighta-mandala-v13-api.hf.space/api/predict \ + -H "Content-Type: application/json" \ + -d '{"data": ["TOEFL 100점 만다라트 차트를 만들어줘", "You are a helpful assistant that generates mandala learning plans in JSON format.", 2048, 0.7, 0.9, true]}' +``` diff --git a/chat_template.jinja b/chat_template.jinja new file mode 100644 index 0000000..ba89998 --- /dev/null +++ b/chat_template.jinja @@ -0,0 +1,97 @@ +{%- if tools %} + {{- '<|im_start|>system\n' }} + {%- if messages[0].role == 'system' %} + {{- messages[0].content + '\n\n' }} + {%- endif %} + {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within XML tags:\n" }} + {%- for tool in tools %} + {{- "\n" }} + {{- tool | tojson }} + {%- endfor %} + {{- "\n\n\nFor each function call, return a json object with function name and arguments within XML tags:\n\n{\"name\": , \"arguments\": }\n<|im_end|>\n" }} +{%- else %} + {%- if messages[0].role == 'system' %} + {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }} + {%- endif %} +{%- endif %} +{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %} +{%- for forward_message in messages %} + {%- set index = (messages|length - 1) - loop.index0 %} + {%- set message = messages[index] %} + {%- set tool_start = '' %} + {%- set tool_start_length = tool_start|length %} + {%- set start_of_message = message.content[:tool_start_length] %} + {%- set tool_end = '' %} + {%- set tool_end_length = tool_end|length %} + {%- set start_pos = (message.content|length) - tool_end_length %} + {%- if start_pos < 0 %} + {%- set start_pos = 0 %} + {%- endif %} + {%- set end_of_message = message.content[start_pos:] %} + {%- if ns.multi_step_tool and message.role == "user" and not(start_of_message == tool_start and end_of_message == tool_end) %} + {%- set ns.multi_step_tool = false %} + {%- set ns.last_query_index = index %} + {%- endif %} +{%- endfor %} +{%- for message in messages %} + {%- if (message.role == "user") or (message.role == "system" and not loop.first) %} + {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }} + {%- elif message.role == "assistant" %} + {%- set content = message.content %} + {%- set reasoning_content = '' %} + {%- if message.reasoning_content is defined and message.reasoning_content is not none %} + {%- set reasoning_content = message.reasoning_content %} + {%- else %} + {%- if '' in message.content %} + {%- set content = (message.content.split('')|last).lstrip('\n') %} + {%- set reasoning_content = (message.content.split('')|first).rstrip('\n') %} + {%- set reasoning_content = (reasoning_content.split('')|last).lstrip('\n') %} + {%- endif %} + {%- endif %} + {%- if loop.index0 > ns.last_query_index %} + {%- if loop.last or (not loop.last and reasoning_content) %} + {{- '<|im_start|>' + message.role + '\n\n' + reasoning_content.strip('\n') + '\n\n\n' + content.lstrip('\n') }} + {%- else %} + {{- '<|im_start|>' + message.role + '\n' + content }} + {%- endif %} + {%- else %} + {{- '<|im_start|>' + message.role + '\n' + content }} + {%- endif %} + {%- if message.tool_calls %} + {%- for tool_call in message.tool_calls %} + {%- if (loop.first and content) or (not loop.first) %} + {{- '\n' }} + {%- endif %} + {%- if tool_call.function %} + {%- set tool_call = tool_call.function %} + {%- endif %} + {{- '\n{"name": "' }} + {{- tool_call.name }} + {{- '", "arguments": ' }} + {%- if tool_call.arguments is string %} + {{- tool_call.arguments }} + {%- else %} + {{- tool_call.arguments | tojson }} + {%- endif %} + {{- '}\n' }} + {%- endfor %} + {%- endif %} + {{- '<|im_end|>\n' }} + {%- elif message.role == "tool" %} + {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %} + {{- '<|im_start|>user' }} + {%- endif %} + {{- '\n\n' }} + {{- message.content }} + {{- '\n' }} + {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %} + {{- '<|im_end|>\n' }} + {%- endif %} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|im_start|>assistant\n' }} + {%- if enable_thinking is defined and enable_thinking is false %} + {{- '\n\n\n\n' }} + {%- endif %} +{%- endif %} \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..03fed76 --- /dev/null +++ b/config.json @@ -0,0 +1,71 @@ +{ + "architectures": [ + "Qwen3ForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 151643, + "dtype": "float16", + "eos_token_id": 151645, + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 2560, + "initializer_range": 0.02, + "intermediate_size": 9728, + "layer_types": [ + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention" + ], + "max_position_embeddings": 40960, + "max_window_layers": 36, + "model_type": "qwen3", + "num_attention_heads": 32, + "num_hidden_layers": 36, + "num_key_value_heads": 8, + "pad_token_id": null, + "rms_norm_eps": 1e-06, + "rope_parameters": { + "rope_theta": 1000000, + "rope_type": "default" + }, + "sliding_window": null, + "tie_word_embeddings": true, + "transformers_version": "5.5.0", + "use_cache": true, + "use_sliding_window": false, + "vocab_size": 151936 +} diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..efdece3 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,13 @@ +{ + "bos_token_id": 151643, + "do_sample": true, + "eos_token_id": [ + 151645, + 151643 + ], + "pad_token_id": 151643, + "temperature": 0.6, + "top_k": 20, + "top_p": 0.95, + "transformers_version": "5.5.0" +} diff --git a/insighta-mandala-v13-Q4_K_M.gguf b/insighta-mandala-v13-Q4_K_M.gguf new file mode 100644 index 0000000..f00dc3c --- /dev/null +++ b/insighta-mandala-v13-Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a66edd9572256793cc863caecd6b52e6213b3ac1d08767ad6b6186b87baa5aa +size 2497280800 diff --git a/insighta-mandala-v13-Q8_0.gguf b/insighta-mandala-v13-Q8_0.gguf new file mode 100644 index 0000000..106748e --- /dev/null +++ b/insighta-mandala-v13-Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e4a2453a542fa3417793ec5ad721b6f7e9d3013b823370b04fc96a4666043a4 +size 4280405280 diff --git a/model.safetensors b/model.safetensors new file mode 100644 index 0000000..5100560 --- /dev/null +++ b/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dec9242dce28d364da25be5ebdeb4378d250ce317c36ee310f42f48511c895ef +size 8044981680 diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..7edcf72 --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7430e9138b76e93fb6f93462394d236b411111aef53cb421ba97d2691040cca +size 11423114 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..8ce94f1 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,15 @@ +{ + "add_prefix_space": false, + "backend": "tokenizers", + "bos_token": null, + "clean_up_tokenization_spaces": false, + "eos_token": "<|im_end|>", + "errors": "replace", + "is_local": true, + "model_max_length": 40960, + "pad_token": "<|PAD_TOKEN|>", + "padding_side": "left", + "split_special_tokens": false, + "tokenizer_class": "Qwen2Tokenizer", + "unk_token": null +}