commit 3f49da36ebd350288c1adde8da59be442ac8d457 Author: ModelHub XC Date: Mon May 4 01:33:56 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: kenonix/Llama-3.3-8B-Thinking-Gemini-Flash-11000x-128k-Q8_0-GGUF Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..873c1bf --- /dev/null +++ b/.gitattributes @@ -0,0 +1,36 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +llama-3.3-8b-thinking-gemini-flash-11000x-128k-q8_0.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..7f8961c --- /dev/null +++ b/README.md @@ -0,0 +1,95 @@ +--- +license: apache-2.0 +datasets: +- TeichAI/gemini-2.5-flash-11000x +base_model: DavidAU/Llama-3.3-8B-Thinking-Gemini-Flash-11000x-128k +language: +- en +- fr +- de +- es +- it +- pt +- zh +- ja +- ru +- ko +tags: +- thinking +- reasoning +- Gemini Flash +- creative +- creative writing +- fiction writing +- plot generation +- sub-plot generation +- story generation +- scene continue +- storytelling +- fiction story +- science fiction +- romance +- all genres +- story +- writing +- vivid prosing +- vivid writing +- fiction +- roleplaying +- bfloat16 +- role play +- 128k context +- llama3.3 +- llama-3 +- llama-3.3 +- unsloth +- finetune +- llama-cpp +- gguf-my-repo +pipeline_tag: text-generation +library_name: transformers +--- + +# kenonix/Llama-3.3-8B-Thinking-Gemini-Flash-11000x-128k-Q8_0-GGUF +This model was converted to GGUF format from [`DavidAU/Llama-3.3-8B-Thinking-Gemini-Flash-11000x-128k`](https://huggingface.co/DavidAU/Llama-3.3-8B-Thinking-Gemini-Flash-11000x-128k) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. +Refer to the [original model card](https://huggingface.co/DavidAU/Llama-3.3-8B-Thinking-Gemini-Flash-11000x-128k) for more details on the model. + +## Use with llama.cpp +Install llama.cpp through brew (works on Mac and Linux) + +```bash +brew install llama.cpp + +``` +Invoke the llama.cpp server or the CLI. + +### CLI: +```bash +llama-cli --hf-repo kenonix/Llama-3.3-8B-Thinking-Gemini-Flash-11000x-128k-Q8_0-GGUF --hf-file llama-3.3-8b-thinking-gemini-flash-11000x-128k-q8_0.gguf -p "The meaning to life and the universe is" +``` + +### Server: +```bash +llama-server --hf-repo kenonix/Llama-3.3-8B-Thinking-Gemini-Flash-11000x-128k-Q8_0-GGUF --hf-file llama-3.3-8b-thinking-gemini-flash-11000x-128k-q8_0.gguf -c 2048 +``` + +Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. + +Step 1: Clone llama.cpp from GitHub. +``` +git clone https://github.com/ggerganov/llama.cpp +``` + +Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). +``` +cd llama.cpp && LLAMA_CURL=1 make +``` + +Step 3: Run inference through the main binary. +``` +./llama-cli --hf-repo kenonix/Llama-3.3-8B-Thinking-Gemini-Flash-11000x-128k-Q8_0-GGUF --hf-file llama-3.3-8b-thinking-gemini-flash-11000x-128k-q8_0.gguf -p "The meaning to life and the universe is" +``` +or +``` +./llama-server --hf-repo kenonix/Llama-3.3-8B-Thinking-Gemini-Flash-11000x-128k-Q8_0-GGUF --hf-file llama-3.3-8b-thinking-gemini-flash-11000x-128k-q8_0.gguf -c 2048 +``` diff --git a/llama-3.3-8b-thinking-gemini-flash-11000x-128k-q8_0.gguf b/llama-3.3-8b-thinking-gemini-flash-11000x-128k-q8_0.gguf new file mode 100644 index 0000000..7ff04ef --- /dev/null +++ b/llama-3.3-8b-thinking-gemini-flash-11000x-128k-q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3937251134c047ed7940cfec228ed0b35acaa2ae94092304d6325006b391be7f +size 8540771968