From c70b49eb5b6fc4b2772dd34bc60fe6f9f4d22d51 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Mon, 20 Apr 2026 09:03:16 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: gyung/lfm2-1.2b-koen-mt-v4-100k-GGUF Source: Original Platform --- .gitattributes | 41 ++++++++++ README.md | 105 ++++++++++++++++++++++++++ lfm2-1.2b-koen-mt-v4-100k-Q4_0.gguf | 3 + lfm2-1.2b-koen-mt-v4-100k-Q4_K_M.gguf | 3 + lfm2-1.2b-koen-mt-v4-100k-Q5_K_M.gguf | 3 + lfm2-1.2b-koen-mt-v4-100k-Q6_K.gguf | 3 + lfm2-1.2b-koen-mt-v4-100k-Q8_0.gguf | 3 + lfm2-1.2b-koen-mt-v4-100k-f16.gguf | 3 + 8 files changed, 164 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 lfm2-1.2b-koen-mt-v4-100k-Q4_0.gguf create mode 100644 lfm2-1.2b-koen-mt-v4-100k-Q4_K_M.gguf create mode 100644 lfm2-1.2b-koen-mt-v4-100k-Q5_K_M.gguf create mode 100644 lfm2-1.2b-koen-mt-v4-100k-Q6_K.gguf create mode 100644 lfm2-1.2b-koen-mt-v4-100k-Q8_0.gguf create mode 100644 lfm2-1.2b-koen-mt-v4-100k-f16.gguf diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..3c41250 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,41 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +lfm2-1.2b-koen-mt-v4-100k-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text +lfm2-1.2b-koen-mt-v4-100k-f16.gguf filter=lfs diff=lfs merge=lfs -text +lfm2-1.2b-koen-mt-v4-100k-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +lfm2-1.2b-koen-mt-v4-100k-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +lfm2-1.2b-koen-mt-v4-100k-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +lfm2-1.2b-koen-mt-v4-100k-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..3be75d2 --- /dev/null +++ b/README.md @@ -0,0 +1,105 @@ +--- +license: other +license_name: lfm-open-license-v1.0 +license_link: https://huggingface.co/LiquidAI/LFM2-1.2B/blob/main/LICENSE +language: +- ko +- en +pipeline_tag: translation +tags: +- lfm2 +- liquid-ai +- korean +- gguf +- quantization +base_model: gyung/lfm2-1.2b-koen-mt-v4-100k +--- + +# 🌊 LFM2-1.2B-KoEn-MT-v4-100k-GGUF + +이 λ¦¬ν¬μ§€ν† λ¦¬λŠ” [gyung/lfm2-1.2b-koen-mt-v4-100k](https://huggingface.co/gyung/lfm2-1.2b-koen-mt-v4-100k) λͺ¨λΈμ˜ **GGUF(Gemma/Llama.cpp Compatible)** μ–‘μžν™” 버전을 ν¬ν•¨ν•˜κ³  μžˆμŠ΅λ‹ˆλ‹€. + +## ℹ️ λͺ¨λΈ μ„€λͺ… (Model Description) + +**LFM2-1.2B-KoEn-MT-v4-100k**은 LiquidAI의 `LFM2-1.2B` μ•„ν‚€ν…μ²˜λ₯Ό 기반으둜 ν•œκ΅­μ–΄-μ˜μ–΄ λ²ˆμ—­ μ„±λŠ₯을 κ·ΉλŒ€ν™”ν•˜κΈ° μœ„ν•΄ **100,000개의 κ³ ν’ˆμ§ˆ 병렬 데이터셋**으둜 νŒŒμΈνŠœλ‹λœ λͺ¨λΈμž…λ‹ˆλ‹€. + +* **Base Model**: LiquidAI/LFM2-1.2B +* **Finetuned by**: Gyung +* **Parameters**: 1.2B +* **Purpose**: Korean-English Translation (ν•œκ΅­μ–΄-μ˜μ–΄ λ²ˆμ—­) + +## πŸ“¦ μ œκ³΅λ˜λŠ” GGUF 파일 (Quantization Methods) + +μ‚¬μš© ν™˜κ²½κ³Ό ν•„μš”μ— 따라 μ μ ˆν•œ μ–‘μžν™” 버전을 μ„ νƒν•˜μ—¬ λ‹€μš΄λ‘œλ“œν•˜μ„Έμš”. (ꢌμž₯: `Q4_K_M` λ˜λŠ” `Q5_K_M`) + +| 파일λͺ… (μ˜ˆμ‹œ) | μ–‘μžν™” (Quant) | 크기 (Size) | μ„€λͺ… (Description) | +| --- | --- | --- | --- | +| `lfm2-1.2b-koen-mt-v4-100k-f16.gguf` | F16 | ~2.34 GB | 원본 μ„±λŠ₯ μœ μ§€, μš©λŸ‰ 큼 | +| `lfm2-1.2b-koen-mt-v4-100k-q8_0.gguf` | Q8_0 | ~1.25 GB | ν’ˆμ§ˆ 손싀 거의 μ—†μŒ | +| `lfm2-1.2b-koen-mt-v4-100k-q6_k.gguf` | Q6_K | ~963 MB | 높은 ν’ˆμ§ˆ, κ· ν˜• 작힌 μ„±λŠ₯ | +| `lfm2-1.2b-koen-mt-v4-100k-q5_k_m.gguf` | Q5_K_M | ~843 MB | **μΆ”μ²œ**: ν’ˆμ§ˆκ³Ό 속도/μš©λŸ‰μ˜ 졜적 κ· ν˜• | +| `lfm2-1.2b-koen-mt-v4-100k-q4_k_m.gguf` | Q4_K_M | ~731 MB | **μΆ”μ²œ**: 적은 λ©”λͺ¨λ¦¬ μ†Œλͺ¨, μ€€μˆ˜ν•œ μ„±λŠ₯ | +| `lfm2-1.2b-koen-mt-v4-100k-q4_0.gguf` | Q4_0 | ~696 MB | κ°€μž₯ 가벼움, 일뢀 ν’ˆμ§ˆ μ €ν•˜ κ°€λŠ₯μ„± | + +## πŸš€ μ‚¬μš© 방법 (Usage) + +### llama.cpp + +μ΅œμ‹  λ²„μ „μ˜ `llama.cpp`λ₯Ό μ‚¬μš©ν•˜μ—¬ μ‹€ν–‰ν•  수 μžˆμŠ΅λ‹ˆλ‹€. (LFM μ•„ν‚€ν…μ²˜ 지원 μ—¬λΆ€λ₯Ό ν™•μΈν•˜μ„Έμš”) + +```bash +./llama-cli -m lfm2-1.2b-koen-mt-v4-100k-q5_k_m.gguf \ + -p "Translate to Korean: The model is working correctly now." \ + -n 256 +```` + +### Python (llama-cpp-python) + +```python +from llama_cpp import Llama + +llm = Llama( + model_path="./lfm2-1.2b-koen-mt-v4-100k-q5_k_m.gguf", + n_ctx=2048, + verbose=False +) + +prompt = "Translate to Korean: The model is working correctly now." +output = llm( + f"User: {prompt}\nAssistant:", + max_tokens=256, + stop=["User:", "\n"], + echo=True +) + +print(output['choices'][0]['text']) +``` + +## πŸ“Š 벀치마크 (Benchmarks) + +원본 λͺ¨λΈ(F16) κΈ°μ€€ **Flores-200** 평가 κ²°κ³Όμž…λ‹ˆλ‹€. GGUF μ–‘μžν™” μ‹œ μ μˆ˜λŠ” μ†Œν­ ν•˜λ½ν•  수 μžˆμŠ΅λ‹ˆλ‹€. + + * **LFM2-1.2B-KOEN-MT-v4-100k**: CHrF++ **30.98** / BLEU **11.09** + * Google Translate: CHrF++ 39.27 + * NLLB-200-Distilled-600M: CHrF++ 31.97 + +## πŸ“œ λΌμ΄μ„ μŠ€ (License) + +이 λͺ¨λΈμ€ **Liquid AI LFM Open License v1.0**을 λ”°λ¦…λ‹ˆλ‹€. + + * ν•™μˆ /개인 연ꡬ: μ œν•œ μ—†μŒ + * 상업적 이용: μ—° 맀좜 $10M 미만 무료 (초과 μ‹œ 별도 계약 ν•„μš”) + * μžμ„Έν•œ λ‚΄μš©μ€ [LICENSE](https://huggingface.co/LiquidAI/LFM2-1.2B/blob/main/LICENSE)λ₯Ό μ°Έκ³ ν•˜μ„Έμš”. + +## Citation + +```bibtex +@misc{lfm2-1.2b-koen-mt-v4-100k, + author = {Gyung}, + title = {LFM2-1.2B Korean-English Machine Translation Model v4}, + year = {2025}, + publisher = {Hugging Face}, + journal = {Hugging Face Model Hub}, + howpublished = {\url{[https://huggingface.co/gyung/lfm2-1.2b-koen-mt-v4-100k](https://huggingface.co/gyung/lfm2-1.2b-koen-mt-v4-100k)}} +} +``` \ No newline at end of file diff --git a/lfm2-1.2b-koen-mt-v4-100k-Q4_0.gguf b/lfm2-1.2b-koen-mt-v4-100k-Q4_0.gguf new file mode 100644 index 0000000..99e0891 --- /dev/null +++ b/lfm2-1.2b-koen-mt-v4-100k-Q4_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33029cca1aeb21db5ae6215815b524b32af50bffa2d025a541daa1c887028063 +size 695751360 diff --git a/lfm2-1.2b-koen-mt-v4-100k-Q4_K_M.gguf b/lfm2-1.2b-koen-mt-v4-100k-Q4_K_M.gguf new file mode 100644 index 0000000..b303df6 --- /dev/null +++ b/lfm2-1.2b-koen-mt-v4-100k-Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2350bb9f69e5b8377f89bcba3d9371f9a28deeacd5e31979aefde97f2fc816b7 +size 730895040 diff --git a/lfm2-1.2b-koen-mt-v4-100k-Q5_K_M.gguf b/lfm2-1.2b-koen-mt-v4-100k-Q5_K_M.gguf new file mode 100644 index 0000000..058436d --- /dev/null +++ b/lfm2-1.2b-koen-mt-v4-100k-Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7e7c8ffb2459bd309037858b16522628b6a3caa270fc4d26bdc48e0f6bb1e05 +size 843354816 diff --git a/lfm2-1.2b-koen-mt-v4-100k-Q6_K.gguf b/lfm2-1.2b-koen-mt-v4-100k-Q6_K.gguf new file mode 100644 index 0000000..1e0727a --- /dev/null +++ b/lfm2-1.2b-koen-mt-v4-100k-Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3afa8aa989501d7bfad6c43542336f1380a5a03b28739accb5f27fccd83a3cb +size 962843328 diff --git a/lfm2-1.2b-koen-mt-v4-100k-Q8_0.gguf b/lfm2-1.2b-koen-mt-v4-100k-Q8_0.gguf new file mode 100644 index 0000000..2d06e94 --- /dev/null +++ b/lfm2-1.2b-koen-mt-v4-100k-Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b21a1d67e59a0c0b09c29626ec4f90bbb724e0b8df50bbf7bc335fa4ad818aaa +size 1246253760 diff --git a/lfm2-1.2b-koen-mt-v4-100k-f16.gguf b/lfm2-1.2b-koen-mt-v4-100k-f16.gguf new file mode 100644 index 0000000..3fbf5e6 --- /dev/null +++ b/lfm2-1.2b-koen-mt-v4-100k-f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5821b5b54bc4b0e89ad9518ae9dcf8f42e63d25168ca175c4134565e965dc641 +size 2343326400