From 7b3fb4d66ef494f026626dc88e332df2eba754b6 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Sat, 11 Apr 2026 11:34:55 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: RedHatAI/Qwen2.5-0.5B-Instruct-quantized.w8a8 Source: Original Platform --- .gitattributes | 36 ++++++ README.md | 242 ++++++++++++++++++++++++++++++++++++++++ added_tokens.json | 24 ++++ config.json | 3 + configuration.json | 1 + generation_config.json | 14 +++ merges.txt | 3 + model.safetensors | 3 + recipe.yaml | 9 ++ special_tokens_map.json | 31 +++++ tokenizer.json | 3 + tokenizer_config.json | 3 + vocab.json | 3 + 13 files changed, 375 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 added_tokens.json create mode 100644 config.json create mode 100644 configuration.json create mode 100644 generation_config.json create mode 100644 merges.txt create mode 100644 model.safetensors create mode 100644 recipe.yaml create mode 100644 special_tokens_map.json create mode 100644 tokenizer.json create mode 100644 tokenizer_config.json create mode 100644 vocab.json diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..52373fe --- /dev/null +++ b/.gitattributes @@ -0,0 +1,36 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +tokenizer.json filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..5f46919 --- /dev/null +++ b/README.md @@ -0,0 +1,242 @@ +--- +license: apache-2.0 +license_link: https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct/blob/main/LICENSE +language: +- en +pipeline_tag: text-generation +base_model: Qwen/Qwen2.5-0.5B-Instruct +tags: +- chat +- neuralmagic +- llmcompressor +--- + +# Qwen2.5-0.5B-Instruct-quantized.w8a8 + +## Model Overview +- **Model Architecture:** Qwen2 + - **Input:** Text + - **Output:** Text +- **Model Optimizations:** + - **Activation quantization:** INT8 + - **Weight quantization:** INT8 +- **Intended Use Cases:** Intended for commercial and research use multiple languages. Similarly to [Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct), this models is intended for assistant-like chat. +- **Out-of-scope:** Use in any manner that violates applicable laws or regulations (including trade compliance laws). +- **Release Date:** 10/09/2024 +- **Version:** 1.0 +- **License(s):** [apache-2.0](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct/blob/main/LICENSE) +- **Model Developers:** Neural Magic + +Quantized version of [Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct). +It achieves an average score of 43.38 on the [OpenLLM](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) benchmark version 1 and 23.42 on version 2, whereas the unquantized model achieves 43.64 on version 1 and 23.39 on version 2. + +### Model Optimizations + +This model was obtained by quantizing the weights of [Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct) to INT8 data type. +This optimization reduces the number of bits used to represent weights and activations from 16 to 8, reducing GPU memory requirements (by approximately 50%) and increasing matrix-multiply compute throughput (by approximately 2x). +Weight quantization also reduces disk size requirements by approximately 50%. + +Only weights and activations of the linear operators within transformers blocks are quantized. +Weights are quantized with a symmetric static per-channel scheme, where a fixed linear scaling factor is applied between INT8 and floating point representations for each output channel dimension. +Activations are quantized with a symmetric dynamic per-token scheme, computing a linear scaling factor at runtime for each token between INT8 and floating point representations. + +## Deployment + +This model can be deployed efficiently using the [vLLM](https://docs.vllm.ai/en/latest/) backend, as shown in the example below. + +```python +from vllm import LLM, SamplingParams +from transformers import AutoTokenizer + +model_id = "neuralmagic/Qwen2.5-0.5B-Instruct-quantized.w8a8" +number_gpus = 1 +max_model_len = 8192 + +sampling_params = SamplingParams(temperature=0.7, top_p=0.8, max_tokens=256) + +tokenizer = AutoTokenizer.from_pretrained(model_id) + +prompt = "Give me a short introduction to large language model." + +llm = LLM(model=model_id, tensor_parallel_size=number_gpus, max_model_len=max_model_len) + +outputs = llm.generate(prompt, sampling_params) + +generated_text = outputs[0].outputs[0].text +print(generated_text) +``` + +vLLM aslo supports OpenAI-compatible serving. See the [documentation](https://docs.vllm.ai/en/latest/) for more details. + + +## Evaluation + +The model was evaluated on the [OpenLLM](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) leaderboard tasks (version 1) with the [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/383bbd54bc621086e05aa1b030d8d4d5635b25e6) (commit 383bbd54bc621086e05aa1b030d8d4d5635b25e6) and the [vLLM](https://docs.vllm.ai/en/stable/) engine, using the following command: +``` +lm_eval \ + --model vllm \ + --model_args pretrained="neuralmagic/Qwen2.5-0.5B-Instruct-quantized.w8a8",dtype=auto,gpu_memory_utilization=0.9,add_bos_token=True,max_model_len=4096,enable_chunk_prefill=True,tensor_parallel_size=1 \ + --tasks openllm \ + --batch_size auto +``` + +### Accuracy + +#### Open LLM Leaderboard evaluation scores + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Benchmark + Qwen2.5-0.5B-Instruct + Qwen2.5-0.5B-Instruct-quantized.w8a8 (this model) + Recovery +
OpenLLM v1 + MMLU (5-shot) + 46.83 + 46.29 + 98.9% +
ARC Challenge (25-shot) + 33.62 + 33.36 + 99.2% +
GSM-8K (5-shot, strict-match) + 33.21 + 33.21 + 100.0% +
Hellaswag (10-shot) + 51.31 + 50.97 + 99.3% +
Winogrande (5-shot) + 55.01 + 55.01 + 100.0% +
TruthfulQA (0-shot, mc2) + 41.85 + 41.47 + 99.1% +
Average + 43.64 + 43.38 + 99.4% +
OpenLLM v2 + MMLU-Pro (5-shot) + 17.49 + 16.95 + 96.9% +
IFEval (0-shot) + 31.17 + 32.04 + 102.8% +
BBH (3-shot) + 32.79 + 32.51 + 99.2% +
Math-lvl-5 (4-shot) + 0.21 + 0.17 + *** +
GPQA (0-shot) + 25.67 + 26.12 + 101.8% +
MuSR (0-shot) + 33.02 + 32.75 + 99.2% +
Average + 23.39 + 23.42 + 100.1% +
+*** Reference value too low to report meaningful recovery. diff --git a/added_tokens.json b/added_tokens.json new file mode 100644 index 0000000..482ced4 --- /dev/null +++ b/added_tokens.json @@ -0,0 +1,24 @@ +{ + "": 151658, + "": 151657, + "<|box_end|>": 151649, + "<|box_start|>": 151648, + "<|endoftext|>": 151643, + "<|file_sep|>": 151664, + "<|fim_middle|>": 151660, + "<|fim_pad|>": 151662, + "<|fim_prefix|>": 151659, + "<|fim_suffix|>": 151661, + "<|im_end|>": 151645, + "<|im_start|>": 151644, + "<|image_pad|>": 151655, + "<|object_ref_end|>": 151647, + "<|object_ref_start|>": 151646, + "<|quad_end|>": 151651, + "<|quad_start|>": 151650, + "<|repo_name|>": 151663, + "<|video_pad|>": 151656, + "<|vision_end|>": 151653, + "<|vision_pad|>": 151654, + "<|vision_start|>": 151652 +} diff --git a/config.json b/config.json new file mode 100644 index 0000000..c3e2097 --- /dev/null +++ b/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc9d7ff9d7e7ed943291edd199303cd8943755acc8e7b8cf05d6fda15f420598 +size 1920 diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..bbeeda1 --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "text-generation", "allow_remote": true} \ No newline at end of file diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..5219873 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,14 @@ +{ + "bos_token_id": 151643, + "do_sample": true, + "eos_token_id": [ + 151645, + 151643 + ], + "pad_token_id": 151643, + "repetition_penalty": 1.1, + "temperature": 0.7, + "top_k": 20, + "top_p": 0.8, + "transformers_version": "4.45.1" +} diff --git a/merges.txt b/merges.txt new file mode 100644 index 0000000..80c1a19 --- /dev/null +++ b/merges.txt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8831e4f1a044471340f7c0a83d7bd71306a5b867e95fd870f74d0c5308a904d5 +size 1671853 diff --git a/model.safetensors b/model.safetensors new file mode 100644 index 0000000..e4e137a --- /dev/null +++ b/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e8edffe394b0fef5422540357eedaa30e4b072060a728ce2a344402da8d24e2 +size 903168128 diff --git a/recipe.yaml b/recipe.yaml new file mode 100644 index 0000000..76496f6 --- /dev/null +++ b/recipe.yaml @@ -0,0 +1,9 @@ +quant_stage: + quant_modifiers: + GPTQModifier: + sequential_update: true + dampening_frac: 0.01 + ignore: [lm_head] + scheme: W8A8 + targets: Linear + observer: mse diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..ac23c0a --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,31 @@ +{ + "additional_special_tokens": [ + "<|im_start|>", + "<|im_end|>", + "<|object_ref_start|>", + "<|object_ref_end|>", + "<|box_start|>", + "<|box_end|>", + "<|quad_start|>", + "<|quad_end|>", + "<|vision_start|>", + "<|vision_end|>", + "<|vision_pad|>", + "<|image_pad|>", + "<|video_pad|>" + ], + "eos_token": { + "content": "<|im_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..33d22a4 --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb73a25aba3c83c6c815a03a334b0440bd549f9a54fa3673e005f5532f6b32fe +size 11421995 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..a12c302 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e88129d9769a0b14b1587a7d5e829fe93ac0e1511636471fdfc0811951418e6 +size 7306 diff --git a/vocab.json b/vocab.json new file mode 100644 index 0000000..6c49fc6 --- /dev/null +++ b/vocab.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca10d7e9fb3ed18575dd1e277a2579c16d108e32f27439684afa0e10b1440910 +size 2776833