commit 16c12729f0b3b38dc105150136e466afd418e3cd Author: ModelHub XC Date: Fri Apr 10 12:42:58 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: unsloth/LFM2-350M-GGUF Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..d00eeb1 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,45 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +LFM2-350M-UD-Q5_K_XL.gguf filter=lfs diff=lfs merge=lfs -text +LFM2-350M-UD-Q3_K_XL.gguf filter=lfs diff=lfs merge=lfs -text +LFM2-350M-UD-Q2_K_XL.gguf filter=lfs diff=lfs merge=lfs -text +LFM2-350M-Q2_K_L.gguf filter=lfs diff=lfs merge=lfs -text +LFM2-350M-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +LFM2-350M-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +LFM2-350M-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +LFM2-350M-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text +LFM2-350M-Q4_1.gguf filter=lfs diff=lfs merge=lfs -text +LFM2-350M-F16.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/LFM2-350M-F16.gguf b/LFM2-350M-F16.gguf new file mode 100644 index 0000000..532d555 --- /dev/null +++ b/LFM2-350M-F16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eab26c871ba936f60f6990f5ed704e04e75cdc2bb959338241b8c60353a8da16 +size 711482080 diff --git a/LFM2-350M-Q2_K.gguf b/LFM2-350M-Q2_K.gguf new file mode 100644 index 0000000..5d0c140 --- /dev/null +++ b/LFM2-350M-Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f56f1c89b9372a67f99e910c60e633015245a5681125c3aa6c7411a7730a5ed3 +size 160594656 diff --git a/LFM2-350M-Q2_K_L.gguf b/LFM2-350M-Q2_K_L.gguf new file mode 100644 index 0000000..5d0c140 --- /dev/null +++ b/LFM2-350M-Q2_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f56f1c89b9372a67f99e910c60e633015245a5681125c3aa6c7411a7730a5ed3 +size 160594656 diff --git a/LFM2-350M-Q3_K_M.gguf b/LFM2-350M-Q3_K_M.gguf new file mode 100644 index 0000000..c3bf19c --- /dev/null +++ b/LFM2-350M-Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f596419ea04276ad85c92c21cdfe962312c3ff6b66b565d17ac7685e7ff5379 +size 193149664 diff --git a/LFM2-350M-Q3_K_S.gguf b/LFM2-350M-Q3_K_S.gguf new file mode 100644 index 0000000..4676a52 --- /dev/null +++ b/LFM2-350M-Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64abc7608c1dfcf8b5b47f51a5532b24c98042d0ce6923d60866a021ee848e5b +size 181148384 diff --git a/LFM2-350M-Q4_0.gguf b/LFM2-350M-Q4_0.gguf new file mode 100644 index 0000000..f2b5e83 --- /dev/null +++ b/LFM2-350M-Q4_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48c85964c0b306b86bbca5bb6361cbd7608f1810d62ab3af0df849a98bd87f56 +size 219306720 diff --git a/LFM2-350M-Q4_1.gguf b/LFM2-350M-Q4_1.gguf new file mode 100644 index 0000000..45fd4fd --- /dev/null +++ b/LFM2-350M-Q4_1.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eecb148fc73cb13c13a500e47db839379b3bbae17fd1c3babe5511132b1aaa77 +size 237263584 diff --git a/LFM2-350M-Q4_K_M.gguf b/LFM2-350M-Q4_K_M.gguf new file mode 100644 index 0000000..67d7c8e --- /dev/null +++ b/LFM2-350M-Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15ded463c01b6b6f6fe6a5f8ea6b87902aef9f7191bcc9c110c5591fe2f69282 +size 229309152 diff --git a/LFM2-350M-Q4_K_S.gguf b/LFM2-350M-Q4_K_S.gguf new file mode 100644 index 0000000..9e82ba6 --- /dev/null +++ b/LFM2-350M-Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92714679897aba32c485e48b417f6f10ac124007bd6d9986d580aa8017819c1b +size 220748512 diff --git a/LFM2-350M-Q5_K_M.gguf b/LFM2-350M-Q5_K_M.gguf new file mode 100644 index 0000000..3ff1118 --- /dev/null +++ b/LFM2-350M-Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:266ccacb1386665eb9fa3a86f061f67ff708cdca759886a9b02567a506d55a78 +size 260373216 diff --git a/LFM2-350M-Q5_K_S.gguf b/LFM2-350M-Q5_K_S.gguf new file mode 100644 index 0000000..270541e --- /dev/null +++ b/LFM2-350M-Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:115d59ce49f666ba596948bd32af5b74345a4d5bd85869dede25b6874bcbf392 +size 255220448 diff --git a/LFM2-350M-Q6_K.gguf b/LFM2-350M-Q6_K.gguf new file mode 100644 index 0000000..2333eb3 --- /dev/null +++ b/LFM2-350M-Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5975fc787fcb515da73b930a490dee1ccf4ffffd88c89badfc5cca9119c5d0a5 +size 293378784 diff --git a/LFM2-350M-Q8_0.gguf b/LFM2-350M-Q8_0.gguf new file mode 100644 index 0000000..5cd45a8 --- /dev/null +++ b/LFM2-350M-Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd29222147b1f62b4bb739f2fc575f0883ea9a5c05f5ac49b28d101ee9afbab7 +size 379214560 diff --git a/LFM2-350M-UD-Q2_K_XL.gguf b/LFM2-350M-UD-Q2_K_XL.gguf new file mode 100644 index 0000000..5d0c140 --- /dev/null +++ b/LFM2-350M-UD-Q2_K_XL.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f56f1c89b9372a67f99e910c60e633015245a5681125c3aa6c7411a7730a5ed3 +size 160594656 diff --git a/LFM2-350M-UD-Q3_K_XL.gguf b/LFM2-350M-UD-Q3_K_XL.gguf new file mode 100644 index 0000000..c3bf19c --- /dev/null +++ b/LFM2-350M-UD-Q3_K_XL.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f596419ea04276ad85c92c21cdfe962312c3ff6b66b565d17ac7685e7ff5379 +size 193149664 diff --git a/LFM2-350M-UD-Q4_K_XL.gguf b/LFM2-350M-UD-Q4_K_XL.gguf new file mode 100644 index 0000000..67d7c8e --- /dev/null +++ b/LFM2-350M-UD-Q4_K_XL.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15ded463c01b6b6f6fe6a5f8ea6b87902aef9f7191bcc9c110c5591fe2f69282 +size 229309152 diff --git a/LFM2-350M-UD-Q5_K_XL.gguf b/LFM2-350M-UD-Q5_K_XL.gguf new file mode 100644 index 0000000..3ff1118 --- /dev/null +++ b/LFM2-350M-UD-Q5_K_XL.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:266ccacb1386665eb9fa3a86f061f67ff708cdca759886a9b02567a506d55a78 +size 260373216 diff --git a/LFM2-350M-UD-Q6_K_XL.gguf b/LFM2-350M-UD-Q6_K_XL.gguf new file mode 100644 index 0000000..fd2a104 --- /dev/null +++ b/LFM2-350M-UD-Q6_K_XL.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1d7bcc5098bf5b0d85a35b1590cd6fc7c0b83987420fe84474fc75b2b31900a +size 309631712 diff --git a/LFM2-350M-UD-Q8_K_XL.gguf b/LFM2-350M-UD-Q8_K_XL.gguf new file mode 100644 index 0000000..2d17795 --- /dev/null +++ b/LFM2-350M-UD-Q8_K_XL.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a010e4cf8529da19b29947975b71901d0404a1626a7402702964979696dfc313 +size 442129120 diff --git a/README.md b/README.md new file mode 100644 index 0000000..a61683f --- /dev/null +++ b/README.md @@ -0,0 +1,256 @@ +--- +base_model: +- LiquidAI/LFM2-350M +library_name: transformers +license: other +license_name: lfm1.0 +license_link: LICENSE +language: +- en +- ar +- zh +- fr +- de +- ja +- ko +- es +pipeline_tag: text-generation +tags: +- liquid +- unsloth +- lfm2 +- edge +--- +> [!NOTE] +> Includes our **chat template fixes**!
For `llama.cpp`, use `--jinja` +> + +
+

+ Unsloth Dynamic 2.0 achieves superior accuracy & outperforms other leading quants. +

+
+ + + + + + + + + +
+
+ + +
+
+ Liquid AI +
+ + + + Liquid: Playground + + + + + + + + + + + + + + + + + +
+ +# LFM2-350M + +LFM2 is a new generation of hybrid models developed by [Liquid AI](https://www.liquid.ai/), specifically designed for edge AI and on-device deployment. It sets a new standard in terms of quality, speed, and memory efficiency. + +We're releasing the weights of three post-trained checkpoints with 350M, 700M, and 1.2B parameters. They provide the following key features to create AI-powered edge applications: + +* **Fast training & inference** – LFM2 achieves 3x faster training compared to its previous generation. It also benefits from 2x faster decode and prefill speed on CPU compared to Qwen3. +* **Best performance** – LFM2 outperforms similarly-sized models across multiple benchmark categories, including knowledge, mathematics, instruction following, and multilingual capabilities. +* **New architecture** – LFM2 is a new hybrid Liquid model with multiplicative gates and short convolutions. +* **Flexible deployment** – LFM2 runs efficiently on CPU, GPU, and NPU hardware for flexible deployment on smartphones, laptops, or vehicles. + +Find more information about LFM2 in our [blog post](https://www.liquid.ai/blog/liquid-foundation-models-v2-our-second-series-of-generative-ai-models). + +## 📄 Model details + +Due to their small size, **we recommend fine-tuning LFM2 models on narrow use cases** to maximize performance. +They are particularly suited for agentic tasks, data extraction, RAG, creative writing, and multi-turn conversations. +However, we do not recommend using them for tasks that are knowledge-intensive or require programming skills. + +| Property | Value | +| ------------------- | ----------------------------- | +| **Parameters** | 354,483,968 | +| **Layers** | 16 (10 conv + 6 attn) | +| **Context length** | 32,768 tokens | +| **Vocabulary size** | 65,536 | +| **Precision** | bfloat16 | +| **Training budget** | 10 trillion tokens | +| **License** | LFM Open License v1.0 | + +**Supported languages**: English, Arabic, Chinese, French, German, Japanese, Korean, and Spanish. + +**Generation parameters**: We recommend the following parameters: +* `temperature=0.3` +* `min_p=0.15` +* `repetition_penalty=1.05` + +**Chat template**: LFM2 uses a ChatML-like chat template as follows: + +``` +<|startoftext|><|im_start|>system +You are a helpful assistant trained by Liquid AI.<|im_end|> +<|im_start|>user +What is C. elegans?<|im_end|> +<|im_start|>assistant +It's a tiny nematode that lives in temperate soil environments.<|im_end|> +``` + +You can apply it using the dedicated [`.apply_chat_template()`](https://huggingface.co/docs/transformers/en/chat_templating#applychattemplate) function from Hugging Face transformers. + +**Tool use**: It consists of four main steps: +1. **Function definition**: LFM2 takes JSON function definitions as input (JSON objects between `<|tool_list_start|>` and `<|tool_list_end|>` special tokens), usually in the system prompt +2. **Function call**: LFM2 writes Pythonic function calls (a Python list between `<|tool_call_start|>` and `<|tool_call_end|>` special tokens), as the assistant answer. +3. **Function execution**: The function call is executed and the result is returned (string between `<|tool_response_start|>` and `<|tool_response_end|>` special tokens), as a "tool" role. +4. **Final answer**: LFM2 interprets the outcome of the function call to address the original user prompt in plain text. + +Here is a simple example of a conversation using tool use: + +``` +<|startoftext|><|im_start|>system +List of tools: <|tool_list_start|>[{"name": "get_candidate_status", "description": "Retrieves the current status of a candidate in the recruitment process", "parameters": {"type": "object", "properties": {"candidate_id": {"type": "string", "description": "Unique identifier for the candidate"}}, "required": ["candidate_id"]}}]<|tool_list_end|><|im_end|> +<|im_start|>user +What is the current status of candidate ID 12345?<|im_end|> +<|im_start|>assistant +<|tool_call_start|>[get_candidate_status(candidate_id="12345")]<|tool_call_end|>Checking the current status of candidate ID 12345.<|im_end|> +<|im_start|>tool +<|tool_response_start|>{"candidate_id": "12345", "status": "Interview Scheduled", "position": "Clinical Research Associate", "date": "2023-11-20"}<|tool_response_end|><|im_end|> +<|im_start|>assistant +The candidate with ID 12345 is currently in the "Interview Scheduled" stage for the position of Clinical Research Associate, with an interview date set for 2023-11-20.<|im_end|> +``` + +**Architecture**: Hybrid model with multiplicative gates and short convolutions: 10 double-gated short-range LIV convolution blocks and 6 grouped query attention (GQA) blocks. + +**Pre-training mixture**: Approximately 75% English, 20% multilingual, and 5% code data sourced from the web and licensed materials. + +**Training approach**: +* Knowledge distillation using [LFM1-7B](https://www.liquid.ai/blog/introducing-lfm-7b-setting-new-standards-for-efficient-language-models) as teacher model +* Very large-scale SFT on 50% downstream tasks, 50% general domains +* Custom DPO with length normalization and semi-online datasets +* Iterative model merging + +## 🏃 How to run LFM2 + +To run LFM2, you need to install Hugging Face [`transformers`](https://github.com/huggingface/transformers) from source (v4.54.0.dev0). +You can update or install it with the following command: `pip install "transformers @ git+https://github.com/huggingface/transformers.git@main"`. + +Here is an example of how to generate an answer with transformers in Python: + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +# Load model and tokenizer +model_id = "LiquidAI/LFM2-350M" +model = AutoModelForCausalLM.from_pretrained( + model_id, + device_map="auto", + torch_dtype="bfloat16", + trust_remote_code=True, +# attn_implementation="flash_attention_2" <- uncomment on compatible GPU +) +tokenizer = AutoTokenizer.from_pretrained(model_id) + +# Generate answer +prompt = "What is C. elegans?" +input_ids = tokenizer.apply_chat_template( + [{"role": "user", "content": prompt}], + add_generation_prompt=True, + return_tensors="pt", + tokenize=True, +).to(model.device) + +output = model.generate( + input_ids, + do_sample=True, + temperature=0.3, + min_p=0.15, + repetition_penalty=1.05, + max_new_tokens=512, +) + +print(tokenizer.decode(output[0], skip_special_tokens=False)) + +# <|startoftext|><|im_start|>user +# What is C. elegans?<|im_end|> +# <|im_start|>assistant +# C. elegans, also known as Caenorhabditis elegans, is a small, free-living +# nematode worm (roundworm) that belongs to the phylum Nematoda. +``` + +You can directly run and test the model with this [Colab notebook](https://colab.research.google.com/drive/1_q3jQ6LtyiuPzFZv7Vw8xSfPU5FwkKZY?usp=sharing). + +## 🔧 How to fine-tune LFM2 + +We recommend fine-tuning LFM2 models on your use cases to maximize performance. + +| Notebook | Description | Link | +|-------|------|------| +| SFT + LoRA | Supervised Fine-Tuning (SFT) notebook with a LoRA adapter in TRL. | Colab link | +| DPO | Preference alignment with Direct Preference Optimization (DPO) in TRL. | Colab link | + +## 📈 Performance + +LFM2 outperforms similar-sized models across different evaluation categories. + +### 1. Automated benchmarks + +![image/png](https://cdn-uploads.huggingface.co/production/uploads/61b8e2ba285851687028d395/3cB7VqMnrG9I8EqrL7k-q.png) + +| Model | MMLU | GPQA | IFEval | IFBench | GSM8K | MGSM | MMMLU | +|-------|------|------|--------|---------|-------|------|-------| +| LFM2-350M | 43.43 | 27.46 | 65.12 | 16.41 | 30.1 | 29.52 | 37.99 | +| LFM2-700M | 49.9 | 28.48 | 72.23 | 20.56 | 46.4 | 45.36 | 43.28 | +| LFM2-1.2B | *55.23* | **31.47** | **74.89** | *20.7* | *58.3* | *55.04* | **46.73** | +| Qwen3-0.6B | 44.93 | 22.14 | 64.24 | 19.75 | 36.47 | 41.28 | 30.84 | +| Qwen3-1.7B | **59.11** | 27.72 | *73.98* | **21.27** | 51.4 | **66.56** | *46.51* | +| Llama-3.2-1B-Instruct | 46.6 | *28.84* | 52.39 | 16.86 | 35.71 | 29.12 | 38.15 | +| gemma-3-1b-it | 40.08 | 21.07 | 62.9 | 17.72 | **59.59** | 43.6 | 34.43 | + +### 2. LLM-as-a-Judge + +![image/png](https://cdn-uploads.huggingface.co/production/uploads/61b8e2ba285851687028d395/4Yxx0l9aQ6ATrps5GWHzv.png) +![image/png](https://cdn-uploads.huggingface.co/production/uploads/61b8e2ba285851687028d395/lzpZOGwH-8bTlOWd3tv6M.png) + +### 3. Inference + +#### Throughput comparison on CPU in ExecuTorch + +![image/png](https://cdn-uploads.huggingface.co/production/uploads/61b8e2ba285851687028d395/KoKcsXUOnkvz2dwZ99k08.png) + +#### Throughput comparison on CPU in Llama.cpp + +![image/png](https://cdn-uploads.huggingface.co/production/uploads/61b8e2ba285851687028d395/c7UYZ5nh6qJMB4rd6WKde.png) + +## 📬 Contact + +If you are interested in custom solutions with edge deployment, please contact [our sales team](https://www.liquid.ai/contact).