commit 2065f00ff9a4c5dd06c20efbf66c04c17896f3f7 Author: ModelHub XC Date: Fri Apr 24 14:49:14 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: aayanmishra-ml/Atlas-Pro-1.5B-Preview-GGUF Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..dccb58f --- /dev/null +++ b/.gitattributes @@ -0,0 +1,47 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +Atlas-Pro-1.5B-Preview.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text +Atlas-Pro-1.5B-Preview.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +Atlas-Pro-1.5B-Preview.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +Atlas-Pro-1.5B-Preview.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Atlas-Pro-1.5B-Preview.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Atlas-Pro-1.5B-Preview.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Atlas-Pro-1.5B-Preview.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Atlas-Pro-1.5B-Preview.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Atlas-Pro-1.5B-Preview.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Atlas-Pro-1.5B-Preview.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +Atlas-Pro-1.5B-Preview.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +Atlas-Pro-1.5B-Preview.f16.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/Atlas-Pro-1.5B-Preview.IQ4_XS.gguf b/Atlas-Pro-1.5B-Preview.IQ4_XS.gguf new file mode 100644 index 0000000..29acaf5 --- /dev/null +++ b/Atlas-Pro-1.5B-Preview.IQ4_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7c2fac30821b0ae7aa24623c37af19826f5dffe717cc6b59a60d7020816717a +size 1026163712 diff --git a/Atlas-Pro-1.5B-Preview.Q2_K.gguf b/Atlas-Pro-1.5B-Preview.Q2_K.gguf new file mode 100644 index 0000000..53d18ca --- /dev/null +++ b/Atlas-Pro-1.5B-Preview.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dee5cb122c0045bbb1c7926e72cc7528b01f31f4536b43a05f94d701674227e9 +size 752881664 diff --git a/Atlas-Pro-1.5B-Preview.Q3_K_L.gguf b/Atlas-Pro-1.5B-Preview.Q3_K_L.gguf new file mode 100644 index 0000000..51e5ae9 --- /dev/null +++ b/Atlas-Pro-1.5B-Preview.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9675dc80bcf94f056e8e85c09458062c281007d6d2321dc4e3007f13268dff1d +size 980441600 diff --git a/Atlas-Pro-1.5B-Preview.Q3_K_M.gguf b/Atlas-Pro-1.5B-Preview.Q3_K_M.gguf new file mode 100644 index 0000000..dc69877 --- /dev/null +++ b/Atlas-Pro-1.5B-Preview.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c86b2b8865775014d66e5eea6fddc2562054aa6dddcf23ac3440f8265c6c5108 +size 924457472 diff --git a/Atlas-Pro-1.5B-Preview.Q3_K_S.gguf b/Atlas-Pro-1.5B-Preview.Q3_K_S.gguf new file mode 100644 index 0000000..cca0bc6 --- /dev/null +++ b/Atlas-Pro-1.5B-Preview.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc030e7bf0fa7dc8bad154b9c2c40987ba93287bf5740fbed2c7c97bf6500beb +size 861223424 diff --git a/Atlas-Pro-1.5B-Preview.Q4_K_M.gguf b/Atlas-Pro-1.5B-Preview.Q4_K_M.gguf new file mode 100644 index 0000000..26b1c7e --- /dev/null +++ b/Atlas-Pro-1.5B-Preview.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7975206906f5365ba731da1ac6f34ba20bfa28edbd4279a94124880b796e4bd3 +size 1117322240 diff --git a/Atlas-Pro-1.5B-Preview.Q4_K_S.gguf b/Atlas-Pro-1.5B-Preview.Q4_K_S.gguf new file mode 100644 index 0000000..58d673e --- /dev/null +++ b/Atlas-Pro-1.5B-Preview.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8af987b0f112216c3ca29ae394a2516d40b12a2d0f1223c255c06cb58a1623c8 +size 1071586304 diff --git a/Atlas-Pro-1.5B-Preview.Q5_K_M.gguf b/Atlas-Pro-1.5B-Preview.Q5_K_M.gguf new file mode 100644 index 0000000..5f9f347 --- /dev/null +++ b/Atlas-Pro-1.5B-Preview.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:020480a9128b5b02396f790c4e060f03919734e70c6c9ccc7dd7643684e42ccd +size 1285495808 diff --git a/Atlas-Pro-1.5B-Preview.Q5_K_S.gguf b/Atlas-Pro-1.5B-Preview.Q5_K_S.gguf new file mode 100644 index 0000000..9419a4d --- /dev/null +++ b/Atlas-Pro-1.5B-Preview.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e2da074ef1dbc7e4668101b1f542e658fed63eb78b3fff1fea3cd9d02495857 +size 1259174912 diff --git a/Atlas-Pro-1.5B-Preview.Q6_K.gguf b/Atlas-Pro-1.5B-Preview.Q6_K.gguf new file mode 100644 index 0000000..869ec8b --- /dev/null +++ b/Atlas-Pro-1.5B-Preview.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2da2a01947dd88a6ea70167b4248b70416714751e445cac58ecab95c5af9ae1 +size 1464180224 diff --git a/Atlas-Pro-1.5B-Preview.Q8_0.gguf b/Atlas-Pro-1.5B-Preview.Q8_0.gguf new file mode 100644 index 0000000..86d2e61 --- /dev/null +++ b/Atlas-Pro-1.5B-Preview.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73c3bc2796f71ba242d412b930b6cfd2bdf38e6f7c9fdd781f40d5d781ae39ac +size 1894533632 diff --git a/Atlas-Pro-1.5B-Preview.f16.gguf b/Atlas-Pro-1.5B-Preview.f16.gguf new file mode 100644 index 0000000..f1bd583 --- /dev/null +++ b/Atlas-Pro-1.5B-Preview.f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58017f1bb609abac4b164ad7c934d371624e48e3a051be5fb8130ba27ceb1cfc +size 3560417792 diff --git a/Atlas-Pro.png b/Atlas-Pro.png new file mode 100644 index 0000000..c097e19 Binary files /dev/null and b/Atlas-Pro.png differ diff --git a/README.md b/README.md new file mode 100644 index 0000000..7cf650e --- /dev/null +++ b/README.md @@ -0,0 +1,151 @@ +--- +base_model: Spestly/Atlas-Pro-1.5B-Preview +tags: +- text-generation-inference +- transformers +- unsloth +- qwen2 +- trl +license: mit +language: +- en +- zh +- fr +- es +- pt +- de +- it +- ru +- ja +- ko +- vi +- th +- ar +- fa +- he +- tr +- cs +- pl +- hi +- bn +- ur +- id +- ms +- lo +- my +- ceb +- km +- tl +- nl +datasets: +- openai/gsm8k +- HuggingFaceH4/ultrachat_200k +library_name: transformers +extra_gated_prompt: "By accessing this model, you agree to comply with ethical usage guidelines and accept full responsibility for its applications. You will not use this model for harmful, malicious, or illegal activities, and you understand that the model's use is subject to ongoing monitoring for misuse. This model is provided 'AS IS' and agreeing to this means that you are responsible for all the outputs generated by you" +extra_gated_fields: + Name: text + Organization: text + Country: country + Date of Birth: date_picker + Intended Use: + type: select + options: + - Research + - Education + - Personal Development + - Commercial Use + - label: Other + value: other + I agree to use this model in accordance with all applicable laws and ethical guidelines: checkbox + I agree to use this model under the MIT licence: checkbox +--- +![Header](./Atlas-Pro.png) +# **Atlas Pro** + +### **Model Overview** +**Atlas Pro** (Previously known as '🏆 Atlas-Experiment 0403 🧪' in AtlasUI) is an advanced language model (LLM) built on top of **Atlas Flash**. It's designed to provide exceptional performance for professional tasks like coding, mathematics, and scientific problem-solving. Atlas Pro builds on Atlas Flash by adding more fine-tuning and specialization, making it perfect for researchers and advanced users. + +--- + +### **Key Features** +- **Improved Problem-Solving:** Handles tricky tasks in programming, math, and sciences better than most models. +- **Advanced Code Generation:** Produces clean and efficient code, but may still miss edge cases occasionally. +- **Domain Expertise:** Focused on technical and scientific domains but works well in general contexts too. +- **Reasoning Improvement:** In this version of Atlas, I have enhanced it's reasoning via synthetic data from models such as Gemini-2.0 Flash Thinking so that it can improve on reasoning. +--- + +# **Evaluation** +Below are the evaluations of the Atlas-Pro models and Deepseek's R1 Qwen Distills (The model that started the whole Atlas family): + +| **Metric** | **Spestly Atlas Pro (7B)** | **Spestly Atlas Pro (1.5B)** | DeepSeek-R1-Distill-Qwen (7B) | DeepSeek-R1-Distill-Qwen (1.5B) | +|-------------------------|---------------------------|------------------------------|-----------------------------------|-------------------------------------| +| **Average** | **22.65%** | 12.93% | 11.73% | 7.53% | +| **IFEval** | 31.54% | 24.30% | **40.38%** | 34.63% | +| **BBH** | **25.27%** | 9.08% | 7.88% | 4.73% | +| **MATH** | **38.90%** | 25.83% | 0.00% | 0.00% | +| **GPQA** | **11.63%** | 6.26% | 3.91% | 2.97% | +| **MUSR** | **6.65%** | 1.86% | 3.55% | 2.08% | +| **MMLU-Pro** | **21.89%** | 10.28% | 14.68% | 0.78% | +| **Carbon Emissions (kg)** | 0.69 kg | **0.59 kg** | 0.68 kg | 0.62 kg | + + + + +--- +### **Intended Use Cases** +Atlas Pro works best for: +- **Technical Professionals:** Helping developers, engineers, and scientists solve complex problems. +- **Educational Assistance:** Offering clear, step-by-step help for students and teachers. +- **Research Support:** Assisting in theoretical and applied science work. +- **Enterprise Tools:** Integrating into company workflows for smarter systems. + +--- + +### **NOTICE** +Atlas Pro is built on **Atlas Flash** and improved to meet high standards. Here’s how it’s made: + +1. **Base Model:** Built upon **Atlas Flash**, which is already quite capable. +2. **Fine-Tuning Details:** + - Used datasets specific to programming, math, and scientific challenges and overall reasoning abilities. + - Refined its performance for professional scenarios. +3. **Performance Highlights:** + - Beats benchmarks with high accuracy, though occasional tweaks might still improve outputs. +--- + +### **Limitations** +- **Knowledge Cutoff:** It doesn’t know about anything recent unless updated. +- **Hardware Requirements:** Needs high-end GPUs to run smoothly. +- **Specialization Bias:** While amazing in its focus areas, general chat capabilities might not be as good as other models. +- **Token Leakage:** In some very rare cases (~1/167), Atlas Pro will experience some token leakage. + +--- + +### **Licensing** +Atlas Pro is released under the **MIT**, which prohibits harmful uses. Make sure to follow the rules in the license agreement. + +--- + +### **Acknowledgments** +Created by **Spestly** as part of the **Atlas Model Family**, Atlas Pro builds on the strong foundation of **Atlas Flash**. Special thanks to **Deepseek's R1 Qwen Distilles** for helping make it happen. + +--- + +### **Usage** +You can use Atlas Pro with this code snippet: + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +# Load the Atlas Pro model +model_name = "Spestly/Atlas-R1-Pro-1.5B-Preview" +tokenizer = AutoTokenizer.from_pretrained(model_name) +model = AutoModelForCausalLM.from_pretrained(model_name) + +# Generate a response +prompt = "Write a Python function to calculate the Fibonacci sequence." +inputs = tokenizer(prompt, return_tensors="pt") +outputs = model.generate(**inputs, max_length=200) +response = tokenizer.decode(outputs[0], skip_special_tokens=True) + +print(response) +``` \ No newline at end of file