From 12b9c293d2d0d185bf37c9297abd15279532c1f5 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Sat, 11 Apr 2026 05:22:56 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: rimon-dutta/Rimon-Math-3B-V1 Source: Original Platform --- .gitattributes | 36 +++++++++ README.md | 165 +++++++++++++++++++++++++++++++++++++++++ chat_template.jinja | 139 ++++++++++++++++++++++++++++++++++ config.json | 37 +++++++++ generation_config.json | 11 +++ model.safetensors | 3 + tokenizer.json | 3 + tokenizer.model.bak | 0 tokenizer_config.json | 16 ++++ 9 files changed, 410 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 chat_template.jinja create mode 100644 config.json create mode 100644 generation_config.json create mode 100644 model.safetensors create mode 100644 tokenizer.json create mode 100644 tokenizer.model.bak create mode 100644 tokenizer_config.json diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..52373fe --- /dev/null +++ b/.gitattributes @@ -0,0 +1,36 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +tokenizer.json filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..1faaa54 --- /dev/null +++ b/README.md @@ -0,0 +1,165 @@ +--- +license: mit +base_model: meta/llama-3.2-3b-instruct +tags: +- unsloth +- llama-3.2 +- mathematics +- reasoning +- arithmetic +- fine-tuned +- rimon-dutta +- logic +- chain-of-thought +- open-r1 +- conversational +- text-generation-inference +language: +- en +pipeline_tag: text-generation +library_name: transformers +datasets: +- open-r1/OpenR1-Math-220k +model_creator: Rimon Dutta +model_name: Rimon-Math-3B-V1 +--- + +# Rimon-Math-3B-V1 + +**Rimon-Math-3B-V1** is a specialized 3-billion-parameter causal language model, fine-tuned for high-accuracy mathematical reasoning and logical problem-solving. Built on the **Llama-3.2-3B-Instruct** architecture and optimized using the **Unsloth** framework, this model excels at generating structured, step-by-step solutions (Chain-of-Thought). + +## Highlights +- **Reasoning Focused:** Trained specifically to break down complex problems into logical steps. +- **Lightweight & Efficient:** Optimized for consumer-grade GPUs (T4, RTX 3060+) and edge deployment. +- **High Compatibility:** Works seamlessly with `transformers`, `vLLM`, and supports `GGUF` conversion for local use. + +--- + +## Model Capabilities +The model is fine-tuned to handle various mathematical domains: +- **Algebra:** Solving equations, inequalities, and system of equations. +- **Calculus:** Derivatives, integrals, and limit problems. +- **Geometry & Trigonometry:** Properties of shapes and trigonometric identities. +- **Logic & Arithmetic:** Multi-step word problems and sequence analysis. + +--- + + +### Training Metrics (Approximation) +| Epoch | Step | Training Loss | Validation Loss | LR | +|------|------|--------------|----------------|--------------| +| 1.0 | 1000 | 0.7104 | 0.6952 | 1.5e-4 | +| 2.0 | 2000 | 0.5911 | 0.5843 | 5.0e-5 | +| 3.0 | 3000 | 0.5244 | 0.5102 | 1.0e-5 | + +--- + +## Usage Guide + +## Installation & Dependencies +To run Rimon-Math-3B-V1 efficiently, ensure you have the latest versions of the following libraries installed. Run this command in your terminal or a notebook cell: +```bash +pip install -U transformers torch accelerate bitsandbytes sentencepiece +``` +| Component | Minimum (4-bit) | Recommended (16-bit) | +|----------|----------------|---------------------| +| GPU | NVIDIA T4 / RTX 3050 (4GB VRAM) | RTX 3060 / A100 (12GB+ VRAM) | +| RAM | 8 GB System RAM | 16 GB System RAM | +| CUDA | 11.8 or higher | 12.1 or higher | + +## How to Use the Model +You can load the model in two different modes depending on your hardware resources. + +# Option 1: 4-bit Quantization (Low VRAM Mode) +Best for users on Google Colab (Free T4) or laptops with limited GPU memory. This uses only ~3.5 GB of VRAM. +```python +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig +import torch + +model_id = "rimon-dutta/Rimon-Math-3B-V1" + +# 4-bit Configuration for memory efficiency +bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_compute_dtype=torch.bfloat16, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True +) + +tokenizer = AutoTokenizer.from_pretrained(model_id) +model = AutoModelForCausalLM.from_pretrained( + model_id, + quantization_config=bnb_config, + device_map="auto", + trust_remote_code=True +) +``` +# Option 2: 16-bit Full Precision (High Accuracy Mode) +Best for users with 8GB+ VRAM (e.g., RTX 3060 12GB or higher). This provides the most precise mathematical reasoning. +```python +from transformers import AutoModelForCausalLM, AutoTokenizer +import torch + +model_id = "rimon-dutta/Rimon-Math-3B-V1" + +tokenizer = AutoTokenizer.from_pretrained(model_id) +model = AutoModelForCausalLM.from_pretrained( + model_id, + torch_dtype=torch.bfloat16, + device_map="auto" +) +``` +# Running Inference (Example) +Once the model is loaded, you can solve math problems using the standard Llama 3.2 chat template. +```python +# Define your math problem +messages = [ + {"role": "system", "content": "You are a specialized math tutor. Explain step-by-step."}, + {"role": "user", "content": "If x + 1/x = 3, find the value of x^5 + 1/x^5."} +] + +# Apply the chat template +inputs = tokenizer.apply_chat_template( + messages, + add_generation_prompt=True, + return_tensors="pt" +).to(model.device) + +# Generate the response +outputs = model.generate( + **inputs, + max_new_tokens=1024, + temperature=0.1, # Low temperature is crucial for math accuracy + do_sample=True, + pad_token_id=tokenizer.eos_token_id +) + +print(tokenizer.decode(outputs[0], skip_special_tokens=True)) +``` +# Troubleshooting Guide +1. GPU Memory Error (OOM): +If you get an "Out of Memory" error, restart your runtime and use Option 1 (4-bit). + +3. BitsAndBytes Issues: +If load_in_4bit fails, ensure you are running on a Linux-based environment (or WSL2 on Windows) and that your bitsandbytes is up to date: + +```bash +pip install -U bitsandbytes +``` +3. CUDA Mismatch: +If you encounter a runtime error regarding CUDA versions, reinstall PyTorch with the correct index URL: + +```bash +pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121 +``` +# Prompt Engineering Tips +Use a system prompt to control reasoning style Keep temperature between 0.1 – 0.3 for math tasks Always request step-by-step explanation Avoid ambiguous wording in problems + +## Author + + +Rimon Dutta +DevOps Engineer | AI & ML Learner +Kotwali, Bangladesh + + \ No newline at end of file diff --git a/chat_template.jinja b/chat_template.jinja new file mode 100644 index 0000000..d144a9d --- /dev/null +++ b/chat_template.jinja @@ -0,0 +1,139 @@ +{{- bos_token }} +{%- if custom_tools is defined %} + {%- set tools = custom_tools %} +{%- endif %} +{%- if not tools_in_user_message is defined %} + {%- set tools_in_user_message = true %} +{%- endif %} +{%- if not date_string is defined %} + {%- set date_string = "26 July 2024" %} +{%- endif %} +{%- if not tools is defined %} + {%- set tools = none %} +{%- endif %} + +{#- This block extracts the system message, so we can slot it into the right place. #} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content'] %} + {%- set messages = messages[1:] %} +{%- else %} + {%- set system_message = "" %} +{%- endif %} + +{#- System message + builtin tools #} +{{- "<|start_header_id|>system<|end_header_id|> + +" }} +{%- if builtin_tools is defined or tools is not none %} + {{- "Environment: ipython +" }} +{%- endif %} +{%- if builtin_tools is defined %} + {{- "Tools: " + builtin_tools | reject('equalto', 'code_interpreter') | join(", ") + " + +"}} +{%- endif %} +{{- "Cutting Knowledge Date: December 2023 +" }} +{{- "Today Date: " + date_string + " + +" }} +{%- if tools is not none and not tools_in_user_message %} + {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables. + +" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- " + +" }} + {%- endfor %} +{%- endif %} +{{- system_message }} +{{- "<|eot_id|>" }} + +{#- Custom tools are passed in a user message with some extra guidance #} +{%- if tools_in_user_message and not tools is none %} + {#- Extract the first user message so we can plug it in here #} + {%- if messages | length != 0 %} + {%- set first_user_message = messages[0]['content'] %} + {%- set messages = messages[1:] %} + {%- else %} + {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }} +{%- endif %} + {{- '<|start_header_id|>user<|end_header_id|> + +' -}} + {{- "Given the following functions, please respond with a JSON for a function call " }} + {{- "with its proper arguments that best answers the given prompt. + +" }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables. + +" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- " + +" }} + {%- endfor %} + {{- first_user_message + "<|eot_id|>"}} +{%- endif %} + +{%- for message in messages %} + {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|> + +'+ message['content'] + '<|eot_id|>' }} + {%- elif 'tool_calls' in message %} + {%- if not message.tool_calls|length == 1 %} + {{- raise_exception("This model only supports single tool-calls at once!") }} + {%- endif %} + {%- set tool_call = message.tool_calls[0].function %} + {%- if builtin_tools is defined and tool_call.name in builtin_tools %} + {{- '<|start_header_id|>assistant<|end_header_id|> + +' -}} + {{- "<|python_tag|>" + tool_call.name + ".call(" }} + {%- for arg_name, arg_val in tool_call.arguments | items %} + {{- arg_name + '="' + arg_val + '"' }} + {%- if not loop.last %} + {{- ", " }} + {%- endif %} + {%- endfor %} + {{- ")" }} + {%- else %} + {{- '<|start_header_id|>assistant<|end_header_id|> + +' -}} + {{- '{"name": "' + tool_call.name + '", ' }} + {{- '"parameters": ' }} + {{- tool_call.arguments | tojson }} + {{- "}" }} + {%- endif %} + {%- if builtin_tools is defined %} + {#- This means we're in ipython mode #} + {{- "<|eom_id|>" }} + {%- else %} + {{- "<|eot_id|>" }} + {%- endif %} + {%- elif message.role == "tool" or message.role == "ipython" %} + {{- "<|start_header_id|>ipython<|end_header_id|> + +" }} + {%- if message.content is mapping or message.content is iterable %} + {{- message.content | tojson }} + {%- else %} + {{- message.content }} + {%- endif %} + {{- "<|eot_id|>" }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|start_header_id|>assistant<|end_header_id|> + +' }} +{%- endif %} diff --git a/config.json b/config.json new file mode 100644 index 0000000..3e17c1c --- /dev/null +++ b/config.json @@ -0,0 +1,37 @@ +{ + "architectures": [ + "LlamaForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 128000, + "dtype": "float16", + "eos_token_id": 128001, + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 3072, + "initializer_range": 0.02, + "intermediate_size": 8192, + "max_position_embeddings": 131072, + "mlp_bias": false, + "model_type": "llama", + "num_attention_heads": 24, + "num_hidden_layers": 28, + "num_key_value_heads": 8, + "pad_token_id": 128004, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_parameters": { + "factor": 32.0, + "high_freq_factor": 4.0, + "low_freq_factor": 1.0, + "original_max_position_embeddings": 8192, + "rope_theta": 500000.0, + "rope_type": "llama3" + }, + "tie_word_embeddings": true, + "transformers_version": "5.0.0", + "unsloth_fixed": true, + "use_cache": true, + "vocab_size": 128256 +} diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..00d0073 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,11 @@ +{ + "_from_model_config": true, + "bos_token_id": 128000, + "do_sample": true, + "eos_token_id": 128001, + "max_length": 131072, + "pad_token_id": 128004, + "temperature": 0.6, + "top_p": 0.9, + "transformers_version": "5.0.0" +} diff --git a/model.safetensors b/model.safetensors new file mode 100644 index 0000000..0bebbea --- /dev/null +++ b/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c0fbf70f907aa2e71b76a1067bfd36982f4574fe372c3690254551fe37183cf +size 6425528856 diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..1c1d8d5 --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/tokenizer.model.bak b/tokenizer.model.bak new file mode 100644 index 0000000..e69de29 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..dc065cc --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,16 @@ +{ + "backend": "tokenizers", + "bos_token": "<|begin_of_text|>", + "clean_up_tokenization_spaces": true, + "eos_token": "<|end_of_text|>", + "is_local": true, + "model_input_names": [ + "input_ids", + "attention_mask" + ], + "model_max_length": 131072, + "pad_token": "<|finetune_right_pad_id|>", + "padding_side": "left", + "tokenizer_class": "TokenizersBackend", + "unk_token": null +}