From 930261a3dd76272b42bd52789c0f840a18313409 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Tue, 21 Apr 2026 20:33:59 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: Mungert/HyperCLOVAX-SEED-Text-Instruct-0.5B-GGUF Source: Original Platform --- .gitattributes | 70 +++++ HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16.gguf | 3 + ...VAX-SEED-Text-Instruct-0.5B-bf16_q8_0.gguf | 3 + ...OVAX-SEED-Text-Instruct-0.5B-f16_q8_0.gguf | 3 + ...rCLOVAX-SEED-Text-Instruct-0.5B-iq3_m.gguf | 3 + ...rCLOVAX-SEED-Text-Instruct-0.5B-iq3_s.gguf | 3 + ...CLOVAX-SEED-Text-Instruct-0.5B-iq3_xs.gguf | 3 + ...LOVAX-SEED-Text-Instruct-0.5B-iq3_xxs.gguf | 3 + ...CLOVAX-SEED-Text-Instruct-0.5B-iq4_nl.gguf | 3 + ...CLOVAX-SEED-Text-Instruct-0.5B-iq4_xs.gguf | 3 + ...CLOVAX-SEED-Text-Instruct-0.5B-q3_k_m.gguf | 3 + ...CLOVAX-SEED-Text-Instruct-0.5B-q3_k_s.gguf | 3 + HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_0.gguf | 3 + HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_1.gguf | 3 + ...CLOVAX-SEED-Text-Instruct-0.5B-q4_k_m.gguf | 3 + ...CLOVAX-SEED-Text-Instruct-0.5B-q4_k_s.gguf | 3 + HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_0.gguf | 3 + HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_1.gguf | 3 + ...CLOVAX-SEED-Text-Instruct-0.5B-q5_k_m.gguf | 3 + ...CLOVAX-SEED-Text-Instruct-0.5B-q5_k_s.gguf | 3 + ...CLOVAX-SEED-Text-Instruct-0.5B-q6_k_m.gguf | 3 + HyperCLOVAX-SEED-Text-Instruct-0.5B-q8_0.gguf | 3 + HyperCLOVAX-SEED-Text-Instruct-0.5B.imatrix | 3 + README.md | 280 ++++++++++++++++++ 24 files changed, 416 insertions(+) create mode 100644 .gitattributes create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16_q8_0.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-f16_q8_0.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_m.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_s.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_xs.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_xxs.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-iq4_nl.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-iq4_xs.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-q3_k_m.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-q3_k_s.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_0.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_1.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k_m.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k_s.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_0.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_1.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_k_m.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_k_s.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-q6_k_m.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B-q8_0.gguf create mode 100644 HyperCLOVAX-SEED-Text-Instruct-0.5B.imatrix create mode 100644 README.md diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..a2b5f23 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,70 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-f16.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-f16_q8_0.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16_q8_0.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-f16_q6_k.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16_q6_k.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-f16_q4_k.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16_q4_k.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q3_k_l.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k_l.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_k_l.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q6_k_l.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q3_k_m.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q3_k_s.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k_s.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_k_m.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_k_s.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q6_k_m.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q8_0.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_0.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_1.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_0_l.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_1_l.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_0.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_1.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_0_l.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_1_l.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_xs.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_xxs.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_s.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_m.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-iq4_xs.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-iq4_nl.gguf filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B.imatrix filter=lfs diff=lfs merge=lfs -text +HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16.gguf new file mode 100644 index 0000000..897a481 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:013a5ab31ad010b74f3a0d0e32221c3cd13e96773ae18a89f10d4dc31433a4f0 +size 1136722944 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16_q8_0.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16_q8_0.gguf new file mode 100644 index 0000000..5d28c5b --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16_q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdc57c2c9276c9cb9d4b693af9469157d1a9d57a9ca57e95ecdddefc036c2097 +size 712049664 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-f16_q8_0.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-f16_q8_0.gguf new file mode 100644 index 0000000..51d9200 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-f16_q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5205d5513aaacc68f65f53fb1531ee9e202ced3592f9aa6fc2c23be47b5ddb8c +size 712049664 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_m.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_m.gguf new file mode 100644 index 0000000..2e326f5 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_m.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c60055572fb3137ca96023d2c9dbb38daa0cf394b411a58955a8c3280b02ecc +size 284280160 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_s.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_s.gguf new file mode 100644 index 0000000..5c112ae --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_s.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84010df18f39e53a62b8eaeaa0c1bdd11866b9acdc2c1b14dd1cca9eb4f6fcb4 +size 276759904 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_xs.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_xs.gguf new file mode 100644 index 0000000..a09c027 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_xs.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:539698937f21c681d48c21601fe6622a10083c669932e80c390c876cebf79493 +size 266143072 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_xxs.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_xxs.gguf new file mode 100644 index 0000000..6a9765f --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_xxs.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d4b46fb2c6feef07e59d570d64fddcd76f8f9793c317e38963b138fc01d03af +size 254641504 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq4_nl.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq4_nl.gguf new file mode 100644 index 0000000..e192250 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq4_nl.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6c0c0222bfc9399e682110e92e80f35a1f545db8a6ea4c07a3ab8ef6dea22b6 +size 351962464 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq4_xs.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq4_xs.gguf new file mode 100644 index 0000000..6b71565 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-iq4_xs.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34f2ab94a03ba537a5b9260f6f469b1d8777ee4995bf1263a0cb55cc27d910f0 +size 337806688 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-q3_k_m.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q3_k_m.gguf new file mode 100644 index 0000000..a1a8891 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q3_k_m.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7ccbc891ade8ccb777c93991f9a97221ad1ad9a9251e159b59c0796f7bfe555 +size 313820512 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-q3_k_s.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q3_k_s.gguf new file mode 100644 index 0000000..b04b683 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q3_k_s.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48cc0c00b794d52a00eac3d5b163e93af0db0db2db8748b8477357a145a555d8 +size 281232736 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_0.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_0.gguf new file mode 100644 index 0000000..95635a7 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e3820e057c78ee62f1fbe9a91f79a3cddfec593d4a3a93c25087d85363b4b7b +size 322766176 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_1.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_1.gguf new file mode 100644 index 0000000..0981e3a --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_1.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7c36e3d16c1a52a4fe98873d895659504323599122abe20eb20c92bea6da8a5 +size 358155616 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k_m.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k_m.gguf new file mode 100644 index 0000000..c2da081 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k_m.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:314880238197184dde5a637eb9a4e383e9e496ea0e4ddc1bfd74928f9cccdcb6 +size 365577568 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k_s.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k_s.gguf new file mode 100644 index 0000000..8f1765c --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k_s.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35288d0571fc53a5a2c3be4fb3ee09cf7de32d5d32e4c3f557fc93b76e49810a +size 357598560 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_0.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_0.gguf new file mode 100644 index 0000000..4a78bd6 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06767a5fe1b0c59e06aef073224ae4e50370447515ecd782ed38890a171ddb1c +size 393545056 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_1.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_1.gguf new file mode 100644 index 0000000..40368d6 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_1.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88279f53002d8897c0b75f0cac4a5ef5d90afc4168a241faf1550aff22bf61cc +size 428934496 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_k_m.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_k_m.gguf new file mode 100644 index 0000000..58308c1 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_k_m.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31ef42f2f58b27ea90697b5be65e48298c28a88800c33f582ead1ea079c423ec +size 418612576 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_k_s.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_k_s.gguf new file mode 100644 index 0000000..19e1c01 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q5_k_s.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:668462ad54e52e11857da30fb10f231321d15477a271a6a6632a3ae4ad82a813 +size 414434656 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-q6_k_m.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q6_k_m.gguf new file mode 100644 index 0000000..d5f53fb --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q6_k_m.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:836258aa50a6946c63cb154c84c6b31c963c59ab1024bc62d4b1d030a7312e9a +size 468747616 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B-q8_0.gguf b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q8_0.gguf new file mode 100644 index 0000000..6ff521b --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B-q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ce1341ac0a9aa78c6331748876ba17246d167477976ad0b8c570c25bccecfa6 +size 605881344 diff --git a/HyperCLOVAX-SEED-Text-Instruct-0.5B.imatrix b/HyperCLOVAX-SEED-Text-Instruct-0.5B.imatrix new file mode 100644 index 0000000..f47df34 --- /dev/null +++ b/HyperCLOVAX-SEED-Text-Instruct-0.5B.imatrix @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ed0679206462d97f1dc7c0450e26c4a73b5d0f7502be3ea3eaa408233134773 +size 1086948 diff --git a/README.md b/README.md new file mode 100644 index 0000000..2761254 --- /dev/null +++ b/README.md @@ -0,0 +1,280 @@ +--- +license: other +license_name: hyperclovax-seed +license_link: LICENSE +pipeline_tag: text-generation +library_name: transformers +--- + +# HyperCLOVAX-SEED-Text-Instruct-0.5B GGUF Models + + +## Model Generation Details + +This model was generated using [llama.cpp](https://github.com/ggerganov/llama.cpp) at commit [`5e7d95e2`](https://github.com/ggerganov/llama.cpp/commit/5e7d95e22e386d316f7f659b74c9c34b65507912). + + + + + +## **Choosing the Right Model Format** + +Selecting the correct model format depends on your **hardware capabilities** and **memory constraints**. + +### **BF16 (Brain Float 16) โ€“ Use if BF16 acceleration is available** +- A 16-bit floating-point format designed for **faster computation** while retaining good precision. +- Provides **similar dynamic range** as FP32 but with **lower memory usage**. +- Recommended if your hardware supports **BF16 acceleration** (check your device's specs). +- Ideal for **high-performance inference** with **reduced memory footprint** compared to FP32. + +๐Ÿ“Œ **Use BF16 if:** +โœ” Your hardware has native **BF16 support** (e.g., newer GPUs, TPUs). +โœ” You want **higher precision** while saving memory. +โœ” You plan to **requantize** the model into another format. + +๐Ÿ“Œ **Avoid BF16 if:** +โŒ Your hardware does **not** support BF16 (it may fall back to FP32 and run slower). +โŒ You need compatibility with older devices that lack BF16 optimization. + +--- + +### **F16 (Float 16) โ€“ More widely supported than BF16** +- A 16-bit floating-point **high precision** but with less of range of values than BF16. +- Works on most devices with **FP16 acceleration support** (including many GPUs and some CPUs). +- Slightly lower numerical precision than BF16 but generally sufficient for inference. + +๐Ÿ“Œ **Use F16 if:** +โœ” Your hardware supports **FP16** but **not BF16**. +โœ” You need a **balance between speed, memory usage, and accuracy**. +โœ” You are running on a **GPU** or another device optimized for FP16 computations. + +๐Ÿ“Œ **Avoid F16 if:** +โŒ Your device lacks **native FP16 support** (it may run slower than expected). +โŒ You have memory limitations. + +--- + +### **Quantized Models (Q4_K, Q6_K, Q8, etc.) โ€“ For CPU & Low-VRAM Inference** +Quantization reduces model size and memory usage while maintaining as much accuracy as possible. +- **Lower-bit models (Q4_K)** โ†’ **Best for minimal memory usage**, may have lower precision. +- **Higher-bit models (Q6_K, Q8_0)** โ†’ **Better accuracy**, requires more memory. + +๐Ÿ“Œ **Use Quantized Models if:** +โœ” You are running inference on a **CPU** and need an optimized model. +โœ” Your device has **low VRAM** and cannot load full-precision models. +โœ” You want to reduce **memory footprint** while keeping reasonable accuracy. + +๐Ÿ“Œ **Avoid Quantized Models if:** +โŒ You need **maximum accuracy** (full-precision models are better for this). +โŒ Your hardware has enough VRAM for higher-precision formats (BF16/F16). + +--- + +### **Very Low-Bit Quantization (IQ3_XS, IQ3_S, IQ3_M, Q4_K, Q4_0)** +These models are optimized for **extreme memory efficiency**, making them ideal for **low-power devices** or **large-scale deployments** where memory is a critical constraint. + +- **IQ3_XS**: Ultra-low-bit quantization (3-bit) with **extreme memory efficiency**. + - **Use case**: Best for **ultra-low-memory devices** where even Q4_K is too large. + - **Trade-off**: Lower accuracy compared to higher-bit quantizations. + +- **IQ3_S**: Small block size for **maximum memory efficiency**. + - **Use case**: Best for **low-memory devices** where **IQ3_XS** is too aggressive. + +- **IQ3_M**: Medium block size for better accuracy than **IQ3_S**. + - **Use case**: Suitable for **low-memory devices** where **IQ3_S** is too limiting. + +- **Q4_K**: 4-bit quantization with **block-wise optimization** for better accuracy. + - **Use case**: Best for **low-memory devices** where **Q6_K** is too large. + +- **Q4_0**: Pure 4-bit quantization, optimized for **ARM devices**. + - **Use case**: Best for **ARM-based devices** or **low-memory environments**. + +--- + +### **Summary Table: Model Format Selection** + +| Model Format | Precision | Memory Usage | Device Requirements | Best Use Case | +|--------------|------------|---------------|----------------------|---------------| +| **BF16** | Highest | High | BF16-supported GPU/CPUs | High-speed inference with reduced memory | +| **F16** | High | High | FP16-supported devices | GPU inference when BF16 isn't available | +| **Q4_K** | Medium Low | Low | CPU or Low-VRAM devices | Best for memory-constrained environments | +| **Q6_K** | Medium | Moderate | CPU with more memory | Better accuracy while still being quantized | +| **Q8_0** | High | Moderate | CPU or GPU with enough VRAM | Best accuracy among quantized models | +| **IQ3_XS** | Very Low | Very Low | Ultra-low-memory devices | Extreme memory efficiency and low accuracy | +| **Q4_0** | Low | Low | ARM or low-memory devices | llama.cpp can optimize for ARM devices | + +--- + +## **Included Files & Details** + +### `HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16.gguf` +- Model weights preserved in **BF16**. +- Use this if you want to **requantize** the model into a different format. +- Best if your device supports **BF16 acceleration**. + +### `HyperCLOVAX-SEED-Text-Instruct-0.5B-f16.gguf` +- Model weights stored in **F16**. +- Use if your device supports **FP16**, especially if BF16 is not available. + +### `HyperCLOVAX-SEED-Text-Instruct-0.5B-bf16-q8_0.gguf` +- **Output & embeddings** remain in **BF16**. +- All other layers quantized to **Q8_0**. +- Use if your device supports **BF16** and you want a quantized version. + +### `HyperCLOVAX-SEED-Text-Instruct-0.5B-f16-q8_0.gguf` +- **Output & embeddings** remain in **F16**. +- All other layers quantized to **Q8_0**. + +### `HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k.gguf` +- **Output & embeddings** quantized to **Q8_0**. +- All other layers quantized to **Q4_K**. +- Good for **CPU inference** with limited memory. + +### `HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_k_s.gguf` +- Smallest **Q4_K** variant, using less memory at the cost of accuracy. +- Best for **very low-memory setups**. + +### `HyperCLOVAX-SEED-Text-Instruct-0.5B-q6_k.gguf` +- **Output & embeddings** quantized to **Q8_0**. +- All other layers quantized to **Q6_K** . + +### `HyperCLOVAX-SEED-Text-Instruct-0.5B-q8_0.gguf` +- Fully **Q8** quantized model for better accuracy. +- Requires **more memory** but offers higher precision. + +### `HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_xs.gguf` +- **IQ3_XS** quantization, optimized for **extreme memory efficiency**. +- Best for **ultra-low-memory devices**. + +### `HyperCLOVAX-SEED-Text-Instruct-0.5B-iq3_m.gguf` +- **IQ3_M** quantization, offering a **medium block size** for better accuracy. +- Suitable for **low-memory devices**. + +### `HyperCLOVAX-SEED-Text-Instruct-0.5B-q4_0.gguf` +- Pure **Q4_0** quantization, optimized for **ARM devices**. +- Best for **low-memory environments**. +- Prefer IQ4_NL for better accuracy. + +# ๐Ÿš€ If you find these models useful +โค **Please click "Like" if you find this useful!** +Help me test my **AI-Powered Network Monitor Assistant** with **quantum-ready security checks**: +๐Ÿ‘‰ [Quantum Network Monitor](https://readyforquantum.com/dashboard/?assistant=open&utm_source=huggingface&utm_medium=referral&utm_campaign=huggingface_repo_readme) + +๐Ÿ’ฌ **How to test**: + Choose an **AI assistant type**: + - `TurboLLM` (GPT-4o-mini) + - `HugLLM` (Hugginface Open-source) + - `TestLLM` (Experimental CPU-only) + +### **What Iโ€™m Testing** +Iโ€™m pushing the limits of **small open-source models for AI network monitoring**, specifically: +- **Function calling** against live network services +- **How small can a model go** while still handling: + - Automated **Nmap scans** + - **Quantum-readiness checks** + - **Network Monitoring tasks** + +๐ŸŸก **TestLLM** โ€“ Current experimental model (llama.cpp on 2 CPU threads): +- โœ… **Zero-configuration setup** +- โณ 30s load time (slow inference but **no API costs**) +- ๐Ÿ”ง **Help wanted!** If youโ€™re into **edge-device AI**, letโ€™s collaborate! + +### **Other Assistants** +๐ŸŸข **TurboLLM** โ€“ Uses **gpt-4o-mini** for: +- **Create custom cmd processors to run .net code on Quantum Network Monitor Agents** +- **Real-time network diagnostics and monitoring** +- **Security Audits** +- **Penetration testing** (Nmap/Metasploit) + + +๐Ÿ”ต **HugLLM** โ€“ Latest Open-source models: +- ๐ŸŒ Runs on Hugging Face Inference API + +### ๐Ÿ’ก **Example commands to you could test**: +1. `"Give me info on my websites SSL certificate"` +2. `"Check if my server is using quantum safe encyption for communication"` +3. `"Run a comprehensive security audit on my server"` +4. '"Create a cmd processor to .. (what ever you want)" Note you need to install a Quantum Network Monitor Agent to run the .net code from. This is a very flexible and powerful feature. Use with caution! + +### Final Word + +I fund the servers used to create these model files, run the Quantum Network Monitor service, and pay for inference from Novita and OpenAIโ€”all out of my own pocket. All the code behind the model creation and the Quantum Network Monitor project is [open source](https://github.com/Mungert69). Feel free to use whatever you find helpful. + +If you appreciate the work, please consider [buying me a coffee](https://www.buymeacoffee.com/mahadeva) โ˜•. Your support helps cover service costs and allows me to raise token limits for everyone. + +I'm also open to job opportunities or sponsorship. + +Thank you! ๐Ÿ˜Š + + + + + +![image/png](https://cdn-uploads.huggingface.co/production/uploads/65265ab8f8db96cffcb969dc/szGAraJ_ZawG0kozH5yPi.png) + + +## Overview + +HyperCLOVAX-SEED-Text-Instruct-0.5B is a Text-to-Text model with instruction-following capabilities that excels in understanding Korean language and culture. Compared to external competitors of similar scale, it demonstrates improved mathematical performance and a substantial enhancement in Korean language capability. The HyperCLOVAX-SEED-Text-Instruct-0.5B is currently the smallest model released by the HyperCLOVAX, representing a lightweight solution suitable for deployment in resourceโ€‘constrained environments such as edge devices. It supports a maximum context length of 4K and functions as a versatile small model applicable to a wide range of tasks. The total cost of a single training run for HyperCLOVAX-SEED-Text-Instruct-0.5B was 4.358K A100 GPU hours (approximately USD 6.537K), which is 39 times lower than the cost of training the `QWEN2.5โ€‘0.5Bโ€‘instruct` model. + + +## Basic Information + +- **Architecture**: Transformerโ€‘based (Dense Model) +- **Parameters**: 0.57ย B (total); 0.45ย B (excluding token embeddings, tied embeddings) +- **Input/Output Format**: Textย /ย Text +- **Maximum Context Length**: 4ย K tokens +- **Knowledge Cutoff Date**: Trained on data up to Januaryย 2025 + + +## Training and Data + +The training dataset for HyperCLOVAX-SEED-Text-Instruct-0.5B consists of diverse sources, including the highโ€‘quality data accumulated during the development of HyperCLOVAX-SEED-Text-Instruct-0.5B. Training was conducted in three main stages: +1. **Pretraining**: Knowledge acquisition using highโ€‘quality data and a highโ€‘performance pretrained model. +2. **Rejection Sampling Fineโ€‘Tuning (RFT)**: Enhancement of multiโ€‘domain knowledge and complex reasoning capabilities. +3. **Supervised Fineโ€‘Tuning (SFT)**: Improvement of instructionโ€‘following proficiency. + + +## Training Cost + +HyperCLOVAX-SEED-Text-Instruct-0.5B leveraged HyperCLOVA Xโ€™s lightweight training process and highโ€‘quality data to achieve significantly lower training costs compared to industryโ€‘leading competitors of similar scale. Excluding the SFT stage, a single pretraining run incurred: + +| Pretraining Cost Category | HyperCLOVAX-SEED-Text-Instruct-0.5B | QWEN2.5โ€‘0.5Bโ€‘instruct | +|---------------------------------|-----------------------------------------------|-------------------------------------| +| **A100 GPU Hours** | 4.358ย K | 169.257ย K | +| **Cost (USD)** | 6.537 K | 253.886ย K | + +This represents approximately a 39ร— reduction in pretraining cost relative to `QWEN2.5โ€‘0.5B-instruct`. + +## Benchmarks + +| **Model** | **KMMLU (5-shot, acc)** | **HAE-RAE (5-shot, acc)** | **CLiCK (5-shot, acc)** | **KoBEST (5-shot, acc)** | +| --- | --- | --- | --- | --- | +| HyperCLOVAX-SEED-Text-Base-0.5B | 0.4181 | 0.6370 | 0.5373 | 0.6963 +| HyperCLOVAX-SEED-Text-Instruct-0.5B | 0.3815 | 0.5619 | 0.4446 | 0.6299 | +| QWEN2.5-0.5B-instruct | 0.2968 | 0.3428 | 0.3805 | 0.5025 | + +## HuggingFace Usage Example + +### Python Code +```python +from transformers import AutoModelForCausalLM, AutoTokenizer +model = AutoModelForCausalLM.from_pretrained("naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B").to(device="cuda") +tokenizer = AutoTokenizer.from_pretrained("naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B") + +chat = [ + {"role": "tool_list", "content": ""}, + {"role": "system", "content": "- AI ์–ธ์–ด๋ชจ๋ธ์˜ ์ด๋ฆ„์€ \"CLOVA X\" ์ด๋ฉฐ ๋„ค์ด๋ฒ„์—์„œ ๋งŒ๋“ค์—ˆ๋‹ค.\n- ์˜ค๋Š˜์€ 2025๋…„ 04์›” 24์ผ(๋ชฉ)์ด๋‹ค."}, + {"role": "user", "content": "์Šˆ๋ขฐ๋”ฉ๊ฑฐ ๋ฐฉ์ •์‹๊ณผ ์–‘์ž์—ญํ•™์˜ ๊ด€๊ณ„๋ฅผ ์ตœ๋Œ€ํ•œ ์ž์„ธํžˆ ์•Œ๋ ค์ค˜."}, +] + +inputs = tokenizer.apply_chat_template(chat, add_generation_prompt=True, return_dict=True, return_tensors="pt") +inputs = inputs.to(device="cuda") +output_ids = model.generate(**inputs, max_length=1024, stop_strings=["<|endofturn|>", "<|stop|>"], repetition_penalty=1.2, tokenizer=tokenizer) +print(tokenizer.batch_decode(output_ids)) +``` + +### Result +```bash +['<|im_start|>tool_list\n<|im_end|>\n<|im_start|>system\n- AI ์–ธ์–ด๋ชจ๋ธ์˜ ์ด๋ฆ„์€ "CLOVA X" ์ด๋ฉฐ ๋„ค์ด๋ฒ„์—์„œ ๋งŒ๋“ค์—ˆ๋‹ค.\n- ์˜ค๋Š˜์€ 2025๋…„ 04์›” 24์ผ(๋ชฉ)์ด๋‹ค.<|im_end|>\n<|im_start|>user\n์Šˆ๋ขฐ๋”ฉ๊ฑฐ ๋ฐฉ์ •์‹๊ณผ ์–‘์ž์—ญํ•™์˜ ๊ด€๊ณ„๋ฅผ ์ตœ๋Œ€ํ•œ ์ž์„ธํžˆ ์•Œ๋ ค์ค˜.<|im_end|>\n<|im_start|>assistant\n์–‘์ž์—ญํ•™์€ ์Šˆ๋ขฐ๋”ฉ๊ฑฐ ๋ฐฉ์ •์‹์„ ํ†ตํ•ด ๋ฌผ์งˆ๊ณผ ์—๋„ˆ์ง€, ๊ณต๊ฐ„ ๋“ฑ์˜ ํ˜„์ƒ์„ ์„ค๋ช…ํ•ฉ๋‹ˆ๋‹ค.\n\n**1. ์Šˆ๋ขฐ๋”ฉ๊ฑฐ ๋ฐฉ์ •์‹**\n\n์Šˆ๋ขฐ๋”ฉ๊ฑฐ๋Š” ํŒŒ๋™ํ•จ์ˆ˜๋ฅผ ์ด์šฉํ•˜์—ฌ ์ž…์ž์˜ ์œ„์น˜์™€ ์šด๋™๋Ÿ‰์„ ๊ณ„์‚ฐํ•  ์ˆ˜ ์žˆ๋‹ค๊ณ  ์ฃผ์žฅํ–ˆ์Šต๋‹ˆ๋‹ค. ์ด๋ฅผ ์œ„ํ•ด ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์‹์œผ๋กœ ํ‘œํ˜„๋ฉ๋‹ˆ๋‹ค:\n\n$$\\frac{\\partial \\psi}{\\partial t} = iH \\nabla^2 \\psi + V(x)\\psi $$\n\n์—ฌ๊ธฐ์„œ $\\psi$๋Š” ํŒŒ๋™ํ•จ์ˆ˜์ด๊ณ  $i$๋Š” ํ—ˆ์ˆ˜ ๋‹จ์œ„์ž…๋‹ˆ๋‹ค. ์—ฌ๊ธฐ์„œ $t$๋Š” ์‹œ๊ฐ„, $x$๋Š” ๊ณต๊ฐ„ ์ขŒํ‘œ์ด๋ฉฐ, $H$๋Š” ํ•ด๋ฐ€ํ„ด ์ƒ์ˆ˜๋กœ ์‹œ์Šคํ…œ์˜ ์—๋„ˆ์ง€๋ฅผ ๋‚˜ํƒ€๋ƒ…๋‹ˆ๋‹ค. ๋˜ํ•œ $V(x)$๋Š” ์™ธ๋ถ€ ํž˜์ด๋‚˜ ์žฅ๋ฒฝ์— ์˜ํ•ด ์˜ํ–ฅ์„ ๋ฐ›๋Š” ๋ถ€๋ถ„์„ ๋‚˜ํƒ€๋‚ด๋Š” ํ•จ์ˆ˜๋กœ, ์ผ๋ฐ˜์ ์œผ๋กœ ์ „์œ„์žฅ์„ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค.\n\n**2. ์–‘์ž์—ญํ•™๊ณผ ์Šˆ๋ขฐ๋”ฉ๊ฑฐ ๋ฐฉ์ •์‹์˜ ๊ด€๊ณ„**\n\n์–‘์ž์—ญํ•™์—์„œ๋Š” ์Šˆ๋ขฐ๋”ฉ๊ฑฐ ๋ฐฉ์ •์‹์ด ๋งค์šฐ ์ค‘์š”ํ•œ ์—ญํ• ์„ ํ•ฉ๋‹ˆ๋‹ค. ์ด๋Š” ๋ชจ๋“  ๋ฌผ๋ฆฌ์  ์‹œ์Šคํ…œ์ด ๋ถˆํ™•์ •์„ฑ ์›๋ฆฌ์— ๋”ฐ๋ผ ํ–‰๋™์„ ํ•˜๋ฉฐ, ์ด๋Ÿฌํ•œ ์‹œ์Šคํ…œ๋“ค์€ ํ™•๋ฅ ์ ์œผ๋กœ ์ƒํƒœ๋ฅผ ๊ฐ€์งˆ ์ˆ˜๋ฐ–์— ์—†๊ธฐ ๋•Œ๋ฌธ์ž…๋‹ˆ๋‹ค. ๋”ฐ๋ผ์„œ ์Šˆ๋ขฐ๋”ฉ๊ฑฐ ๋ฐฉ์ •์‹์€ ์–‘์ž์—ญํ•™์„ ์ˆ˜ํ•™์ ์œผ๋กœ ๋ชจ๋ธ๋งํ•˜๋Š” ํ•ต์‹ฌ์ ์ธ ๋„๊ตฌ ์ค‘ ํ•˜๋‚˜์ž…๋‹ˆ๋‹ค.\n\n์˜ˆ๋ฅผ ๋“ค์–ด, ์›์žํ•ต ๋‚ด์˜ ์ „์ž๋“ค์˜ ์ƒํƒœ๋Š” ์Šˆ๋ขฐ๋”ฉ๊ฑฐ ๋ฐฉ์ •์‹์— ์˜ํ•ด ๊ฒฐ์ •๋˜๋ฉฐ, ์ด๋Š” ๋ฌผ๋ฆฌํ•™์  ๋ฒ•์น™์„ ๋”ฐ๋ฅด๋Š” ๊ฒƒ์œผ๋กœ ๋ณด์ž…๋‹ˆ๋‹ค. ๋˜ํ•œ, ๊ด‘์ „ ํšจ๊ณผ์—์„œ๋„ ์Šˆ๋ขฐ๋”ฉ๊ฑฐ ๋ฐฉ์ •์‹์€ ๋น›์ด ๋ฌผ์งˆ ๋‚ด์—์„œ ์–ด๋–ป๊ฒŒ ํก์ˆ˜๋˜๊ณ  ๋ฐ˜์‚ฌ๋˜๋Š”์ง€๋ฅผ ์˜ˆ์ธกํ•˜๋Š”๋ฐ ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค.\n\n**3. ์‘์šฉ ๋ถ„์•ผ**\n\n์Šˆ๋ขฐ๋”ฉ๊ฑฐ ๋ฐฉ์ •์‹์€ ๋‹ค์–‘ํ•œ ๋ถ„์•ผ์—์„œ ํ™œ์šฉ๋˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค๋ฉด, ๋ฐ˜๋„์ฒด ๊ธฐ์ˆ ์—์„œ์˜ ํŠธ๋žœ์ง€์Šคํ„ฐ ์„ค๊ณ„, ํ•ต๋ฌผ๋ฆฌํ•™์—์„œ์˜ ๋ฐฉ์‚ฌ์„ฑ ๋ถ•๊ดด ์—ฐ๊ตฌ ๋“ฑ์ด ์žˆ์œผ๋ฉฐ, ์ด๋Š” ๋ชจ๋‘ ์Šˆ๋ขฐ๋”ฉ๊ฑฐ ๋ฐฉ์ •์‹์„ ๊ธฐ๋ฐ˜์œผ๋กœ ํ•œ ์ด๋ก ์  ๊ธฐ๋ฐ˜ ์œ„์—์„œ ์ด๋ฃจ์–ด์ง‘๋‹ˆ๋‹ค.\n\n๋˜ํ•œ, ํ˜„๋Œ€ ๊ณผํ•™ ๊ธฐ์ˆ ์˜ ๋ฐœ์ „์—๋„ ํฐ ๊ธฐ์—ฌ๋ฅผ ํ•˜๊ณ  ์žˆ๋Š”๋ฐ, ํŠนํžˆ ์ธ๊ณต์ง€๋Šฅ(AI), ์ปดํ“จํ„ฐ ์‹œ๋ฎฌ๋ ˆ์ด์…˜ ๋“ฑ์—์„œ ๋ณต์žกํ•œ ๋ฌธ์ œ๋ฅผ ํ•ด๊ฒฐํ•˜๊ณ  ์ƒˆ๋กœ์šด ์ง€์‹์„ ์ฐฝ์ถœํ•˜๊ธฐ ์œ„ํ•œ ๊ธฐ์ดˆ๊ฐ€ ๋˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค.\n\n๊ฒฐ๋ก ์ ์œผ๋กœ, ์Šˆ๋ขฐ๋”ฉ๊ฑฐ ๋ฐฉ์ •์‹์€ ์–‘์ž์—ญํ•™์˜ ๊ธฐ๋ณธ ๊ฐœ๋…๋“ค์„ ์ดํ•ดํ•˜๊ณ  ํ•ด์„ํ•˜๋ฉฐ, ๊ทธ ๊ฒฐ๊ณผ๋กœ์„œ ๋งŽ์€ ํ˜์‹ ์ ์ด๊ณ  ์‹ค์šฉ์ ์ธ ๊ธฐ์ˆ ์„ ๊ฐ€๋Šฅํ•˜๊ฒŒ ํ–ˆ์Šต๋‹ˆ๋‹ค. ์ด๋Š” ์–‘์ž์—ญํ•™์˜ ์ค‘์š”์„ฑ์„ ๋ณด์—ฌ์ฃผ๋Š” ๋Œ€ํ‘œ์ ์ธ ์˜ˆ์‹œ๋ผ๊ณ  ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.<|im_end|><|endofturn|>'] +``` \ No newline at end of file