From 89f84eba497d68c15df8bd6afc81e2191f277572 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Fri, 10 Apr 2026 12:45:58 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: QuantFactory/calme-2.1-phi3.5-4b-GGUF Source: Original Platform --- .gitattributes | 49 ++++++++ README.md | 206 ++++++++++++++++++++++++++++++++ calme-2.1-phi3.5-4b.Q2_K.gguf | 3 + calme-2.1-phi3.5-4b.Q3_K_L.gguf | 3 + calme-2.1-phi3.5-4b.Q3_K_M.gguf | 3 + calme-2.1-phi3.5-4b.Q3_K_S.gguf | 3 + calme-2.1-phi3.5-4b.Q4_0.gguf | 3 + calme-2.1-phi3.5-4b.Q4_1.gguf | 3 + calme-2.1-phi3.5-4b.Q4_K_M.gguf | 3 + calme-2.1-phi3.5-4b.Q4_K_S.gguf | 3 + calme-2.1-phi3.5-4b.Q5_0.gguf | 3 + calme-2.1-phi3.5-4b.Q5_1.gguf | 3 + calme-2.1-phi3.5-4b.Q5_K_M.gguf | 3 + calme-2.1-phi3.5-4b.Q5_K_S.gguf | 3 + calme-2.1-phi3.5-4b.Q6_K.gguf | 3 + calme-2.1-phi3.5-4b.Q8_0.gguf | 3 + configuration.json | 1 + 17 files changed, 298 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 calme-2.1-phi3.5-4b.Q2_K.gguf create mode 100644 calme-2.1-phi3.5-4b.Q3_K_L.gguf create mode 100644 calme-2.1-phi3.5-4b.Q3_K_M.gguf create mode 100644 calme-2.1-phi3.5-4b.Q3_K_S.gguf create mode 100644 calme-2.1-phi3.5-4b.Q4_0.gguf create mode 100644 calme-2.1-phi3.5-4b.Q4_1.gguf create mode 100644 calme-2.1-phi3.5-4b.Q4_K_M.gguf create mode 100644 calme-2.1-phi3.5-4b.Q4_K_S.gguf create mode 100644 calme-2.1-phi3.5-4b.Q5_0.gguf create mode 100644 calme-2.1-phi3.5-4b.Q5_1.gguf create mode 100644 calme-2.1-phi3.5-4b.Q5_K_M.gguf create mode 100644 calme-2.1-phi3.5-4b.Q5_K_S.gguf create mode 100644 calme-2.1-phi3.5-4b.Q6_K.gguf create mode 100644 calme-2.1-phi3.5-4b.Q8_0.gguf create mode 100644 configuration.json diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..69918b0 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,49 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q4_1.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q5_0.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q5_1.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +calme-2.1-phi3.5-4b.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..617ed25 --- /dev/null +++ b/README.md @@ -0,0 +1,206 @@ + +--- + +language: +- en +license: mit +library_name: transformers +tags: +- chat +- phi +- phi3 +- phi3.5 +- finetune +base_model: microsoft/Phi-3.5-mini-instruct +datasets: +- MaziyarPanahi/truthy-dpo-v0.1-axolotl +model_name: calme-2.1-phi3.5-4b +pipeline_tag: text-generation +inference: false +model_creator: MaziyarPanahi +quantized_by: MaziyarPanahi +model-index: +- name: calme-2.1-phi3.5-4b + results: + - task: + type: text-generation + name: Text Generation + dataset: + name: IFEval (0-Shot) + type: HuggingFaceH4/ifeval + args: + num_few_shot: 0 + metrics: + - type: inst_level_strict_acc and prompt_level_strict_acc + value: 56.59 + name: strict accuracy + source: + url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.1-phi3.5-4b + name: Open LLM Leaderboard + - task: + type: text-generation + name: Text Generation + dataset: + name: BBH (3-Shot) + type: BBH + args: + num_few_shot: 3 + metrics: + - type: acc_norm + value: 36.11 + name: normalized accuracy + source: + url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.1-phi3.5-4b + name: Open LLM Leaderboard + - task: + type: text-generation + name: Text Generation + dataset: + name: MATH Lvl 5 (4-Shot) + type: hendrycks/competition_math + args: + num_few_shot: 4 + metrics: + - type: exact_match + value: 14.43 + name: exact match + source: + url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.1-phi3.5-4b + name: Open LLM Leaderboard + - task: + type: text-generation + name: Text Generation + dataset: + name: GPQA (0-shot) + type: Idavidrein/gpqa + args: + num_few_shot: 0 + metrics: + - type: acc_norm + value: 12.53 + name: acc_norm + source: + url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.1-phi3.5-4b + name: Open LLM Leaderboard + - task: + type: text-generation + name: Text Generation + dataset: + name: MuSR (0-shot) + type: TAUR-Lab/MuSR + args: + num_few_shot: 0 + metrics: + - type: acc_norm + value: 9.77 + name: acc_norm + source: + url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.1-phi3.5-4b + name: Open LLM Leaderboard + - task: + type: text-generation + name: Text Generation + dataset: + name: MMLU-PRO (5-shot) + type: TIGER-Lab/MMLU-Pro + config: main + split: test + args: + num_few_shot: 5 + metrics: + - type: acc + value: 32.61 + name: accuracy + source: + url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.1-phi3.5-4b + name: Open LLM Leaderboard + +--- + +![](https://lh7-rt.googleusercontent.com/docsz/AD_4nXeiuCm7c8lEwEJuRey9kiVZsRn2W-b4pWlu3-X534V3YmVuVc2ZL-NXg2RkzSOOS2JXGHutDuyyNAUtdJI65jGTo8jT9Y99tMi4H4MqL44Uc5QKG77B0d6-JfIkZHFaUA71-RtjyYZWVIhqsNZcx8-OMaA?key=xt3VSDoCbmTY7o-cwwOFwQ) + +# QuantFactory/calme-2.1-phi3.5-4b-GGUF +This is quantized version of [MaziyarPanahi/calme-2.1-phi3.5-4b](https://huggingface.co/MaziyarPanahi/calme-2.1-phi3.5-4b) created using llama.cpp + +# Original Model Card + + +Calme-2 Models + +# MaziyarPanahi/calme-2.1-phi3.5-4b + +This model is a fine-tuned version of the `microsoft/Phi-3.5-mini-instruct`, pushing the boundaries of natural language understanding and generation even further. My goal was to create a versatile and robust model that excels across a wide range of benchmarks and real-world applications. + +## Use Cases + +This model is suitable for a wide range of applications, including but not limited to: + +- Advanced question-answering systems +- Intelligent chatbots and virtual assistants +- Content generation and summarization +- Code generation and analysis +- Complex problem-solving and decision support + +# ⚡ Quantized GGUF + +Here are the quants: [calme-2.1-phi3.5-4b-GGUF](https://huggingface.co/MaziyarPanahi/calme-2.1-phi3.5-4b-GGUF) + + +# 🏆 [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) + +Coming soon! + +# Prompt Template + +This model uses `ChatML` prompt template: + +``` +<|system|> +You are a helpful assistant.<|end|> +<|user|> +How to explain Internet for a medieval knight?<|end|> +<|assistant|> +```` + +# How to use + + +```python + +# Use a pipeline as a high-level helper + +from transformers import pipeline + +messages = [ + {"role": "user", "content": "Who are you?"}, +] +pipe = pipeline("text-generation", model="MaziyarPanahi/calme-2.1-phi3.5-4b") +pipe(messages) + + +# Load model directly + +from transformers import AutoTokenizer, AutoModelForCausalLM + +tokenizer = AutoTokenizer.from_pretrained("MaziyarPanahi/calme-2.1-phi3.5-4b") +model = AutoModelForCausalLM.from_pretrained("MaziyarPanahi/calme-2.1-phi3.5-4b") +``` + + +# Ethical Considerations + +As with any large language model, users should be aware of potential biases and limitations. We recommend implementing appropriate safeguards and human oversight when deploying this model in production environments. +# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) +Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_MaziyarPanahi__calme-2.1-phi3.5-4b) + +| Metric |Value| +|-------------------|----:| +|Avg. |27.01| +|IFEval (0-Shot) |56.59| +|BBH (3-Shot) |36.11| +|MATH Lvl 5 (4-Shot)|14.43| +|GPQA (0-shot) |12.53| +|MuSR (0-shot) | 9.77| +|MMLU-PRO (5-shot) |32.61| + + diff --git a/calme-2.1-phi3.5-4b.Q2_K.gguf b/calme-2.1-phi3.5-4b.Q2_K.gguf new file mode 100644 index 0000000..6a0e4a3 --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f7fbcf42b962f477d3403c5cf5376de6c4ef106268b908243a3097aabd29750 +size 1416204672 diff --git a/calme-2.1-phi3.5-4b.Q3_K_L.gguf b/calme-2.1-phi3.5-4b.Q3_K_L.gguf new file mode 100644 index 0000000..5c9885f --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81ef72c4cfa456c39edbabf58a41bad3c4ac84ebb616ff53faa03b63eadb787d +size 2087597952 diff --git a/calme-2.1-phi3.5-4b.Q3_K_M.gguf b/calme-2.1-phi3.5-4b.Q3_K_M.gguf new file mode 100644 index 0000000..bef061f --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79be43e2c212205591c899ac699d5eacc8d1c96e843357e1e509b7daf21d47bf +size 1955477376 diff --git a/calme-2.1-phi3.5-4b.Q3_K_S.gguf b/calme-2.1-phi3.5-4b.Q3_K_S.gguf new file mode 100644 index 0000000..de102e7 --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4374f52edba171b497c78a3cf24674ce7ca40d036ac534f6d502866fa4d69325 +size 1681799040 diff --git a/calme-2.1-phi3.5-4b.Q4_0.gguf b/calme-2.1-phi3.5-4b.Q4_0.gguf new file mode 100644 index 0000000..08ad9f2 --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q4_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45d9e9d29099476951203de612a3f6272e5bae67204435844b319f7921600a3c +size 2176177536 diff --git a/calme-2.1-phi3.5-4b.Q4_1.gguf b/calme-2.1-phi3.5-4b.Q4_1.gguf new file mode 100644 index 0000000..781de3f --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q4_1.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0e9f28e00581b8d000e3bacea9607f2be776260d03dba74d5ac5b4e9b22da5e +size 2408826240 diff --git a/calme-2.1-phi3.5-4b.Q4_K_M.gguf b/calme-2.1-phi3.5-4b.Q4_K_M.gguf new file mode 100644 index 0000000..410b48f --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:889d9df03d8e2c09ff1dc76713df41ad8a2b0d158c001b837aa8572329e31e27 +size 2393232768 diff --git a/calme-2.1-phi3.5-4b.Q4_K_S.gguf b/calme-2.1-phi3.5-4b.Q4_K_S.gguf new file mode 100644 index 0000000..768de43 --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f93c135d194d3b687ec065bbf07557ceb62acbe7acd9d222ed701dbfaa457b6 +size 2188760448 diff --git a/calme-2.1-phi3.5-4b.Q5_0.gguf b/calme-2.1-phi3.5-4b.Q5_0.gguf new file mode 100644 index 0000000..e9b687d --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q5_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0e90176c016f5c4c1701e4c3b91005fe197b0482e73217d8bcc1fdfdca7ba3c +size 2641474944 diff --git a/calme-2.1-phi3.5-4b.Q5_1.gguf b/calme-2.1-phi3.5-4b.Q5_1.gguf new file mode 100644 index 0000000..968a84b --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q5_1.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91419b16eb82c8bf2299f0edde8f7735fefb2836e0f391fd0f3bbfabe8a10f1e +size 2874123648 diff --git a/calme-2.1-phi3.5-4b.Q5_K_M.gguf b/calme-2.1-phi3.5-4b.Q5_K_M.gguf new file mode 100644 index 0000000..281d0a6 --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32d32f9a396063f95e7aacb613285cb359d5bf064cc87c2bb5aa89ca5b055e8d +size 2815276416 diff --git a/calme-2.1-phi3.5-4b.Q5_K_S.gguf b/calme-2.1-phi3.5-4b.Q5_K_S.gguf new file mode 100644 index 0000000..bc7db87 --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9c2a06d3a77d3cc589a735e7b7c491f445e28719c56b75d8b9d926eed34cee0 +size 2641474944 diff --git a/calme-2.1-phi3.5-4b.Q6_K.gguf b/calme-2.1-phi3.5-4b.Q6_K.gguf new file mode 100644 index 0000000..6c0bb5a --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdf52c665c09eb7573c570e55b1628eee7a0650b8b5944f0c6b76cbc8ac638ce +size 3135853440 diff --git a/calme-2.1-phi3.5-4b.Q8_0.gguf b/calme-2.1-phi3.5-4b.Q8_0.gguf new file mode 100644 index 0000000..0b3f388 --- /dev/null +++ b/calme-2.1-phi3.5-4b.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1237de7e2b2591c3ee24689bf6d63522b7173fd08eb67312e15c9ed4d8e757b4 +size 4061222784 diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{} \ No newline at end of file