commit 609378d63d5d6417322eb865e2f56d637bc3e6bb Author: ModelHub XC Date: Tue May 5 13:29:37 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: FlorianJc/Vigostral-7b-Chat-GGUF Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..9777955 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,40 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +vigostral-7b-chat-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +vigostral-7b-chat-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +vigostral-7b-chat-COPY.gguf filter=lfs diff=lfs merge=lfs -text +vigostral-7b-chat-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +vigostral-7b-chat-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..3a9b4d1 --- /dev/null +++ b/README.md @@ -0,0 +1,34 @@ +--- +license: apache-2.0 +language: +- fr +pipeline_tag: text-generation +tags: +- mistral +- finetuned +- french +- vigogne +- text-generation-inference +--- + +# Vigostral-7b-Chat GGUF +Conversion du modèle vigostral-7b-chat au format GGUF + +Lien du modèle original: +https://huggingface.co/bofenghuang/vigostral-7b-chat/ + +Le projet llama.cpp (pour l'inférence): +https://github.com/ggerganov/llama.cpp/ + +Les modèles ont dans leur nom un suffixe qui définit la quantification. + +La perte de qualité est tirée de la documentation de llama.cpp et a été calculée par la variation de la perplexité (ppl) sur le modèle LLaMA-v1-7B. +Elle n'est donc fournie ici que pour donner une approximation de la perte rééle. + +| Méthode de quantification | Taille du fichier | Perte | Téléchargement | +| ---- | ---- | ---- | ---- | +| COPY | 13,5 Go | Aucune | https://huggingface.co/FlorianJc/Vigostral-7b-Chat-GGUF/blob/main/vigostral-7b-chat-COPY.gguf | +| Q8_0 | 7,2 Go | +0.0004 ppl @ LLaMA-v1-7B | https://huggingface.co/FlorianJc/Vigostral-7b-Chat-GGUF/blob/main/vigostral-7b-chat-Q8_0.gguf | +| Q6_K | 5,5 Go | -0.0008 ppl @ LLaMA-v1-7B | https://huggingface.co/FlorianJc/Vigostral-7b-Chat-GGUF/blob/main/vigostral-7b-chat-Q6_K.gguf | +| Q5_K_M | 4,8 Go | +0.0122 ppl @ LLaMA-v1-7B | https://huggingface.co/FlorianJc/Vigostral-7b-Chat-GGUF/blob/main/vigostral-7b-chat-Q5_K_M.gguf | +| Q4_K_M | 4,1 Go | +0.0532 ppl @ LLaMA-v1-7B | https://huggingface.co/FlorianJc/Vigostral-7b-Chat-GGUF/blob/main/vigostral-7b-chat-Q4_K_M.gguf | \ No newline at end of file diff --git a/vigostral-7b-chat-COPY.gguf b/vigostral-7b-chat-COPY.gguf new file mode 100644 index 0000000..8f47ec8 --- /dev/null +++ b/vigostral-7b-chat-COPY.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e5e2713485c74438a6ec191b794d7243444f40e83274a1d0cdd17fc2045392c +size 14484731424 diff --git a/vigostral-7b-chat-Q4_K_M.gguf b/vigostral-7b-chat-Q4_K_M.gguf new file mode 100644 index 0000000..d3dad88 --- /dev/null +++ b/vigostral-7b-chat-Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66c61d2347db4c22ea120a294161d02865a1b8fe9935bb976d75ef20e8e137d7 +size 4368438848 diff --git a/vigostral-7b-chat-Q5_K_M.gguf b/vigostral-7b-chat-Q5_K_M.gguf new file mode 100644 index 0000000..4f2eb76 --- /dev/null +++ b/vigostral-7b-chat-Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08984b79085e06620fee0ccf82798cc5ebf2101e99d216dc1df008c5004d33c7 +size 5131408960 diff --git a/vigostral-7b-chat-Q6_K.gguf b/vigostral-7b-chat-Q6_K.gguf new file mode 100644 index 0000000..7801b74 --- /dev/null +++ b/vigostral-7b-chat-Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a94f2c166045fa8a039df0b501c9d23042f5ed064a43edd219983b29105f521c +size 5942064704 diff --git a/vigostral-7b-chat-Q8_0.gguf b/vigostral-7b-chat-Q8_0.gguf new file mode 100644 index 0000000..731ddcc --- /dev/null +++ b/vigostral-7b-chat-Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76cae196739b9a9c5d28abca831ec0f662aecde59b5f3968fbfb8a485c9cf191 +size 7695857216