commit fe294906a58d8711832f8e4f18d56ce61d6d0d56 Author: ModelHub XC Date: Wed May 6 12:14:02 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: RedHatAI/Llama-3.2-1B-Instruct-quantized.w8a8 Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..a6344aa --- /dev/null +++ b/.gitattributes @@ -0,0 +1,35 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..a2ec256 --- /dev/null +++ b/README.md @@ -0,0 +1,326 @@ +--- +license: llama3.2 +language: +- en +- de +- fr +- it +- pt +- hi +- es +- th +pipeline_tag: text-generation +tags: +- llama +- llama-3 +- neuralmagic +- llmcompressor +base_model: meta-llama/Llama-3.2-1B-Instruct +--- + +# Llama-3.2-1B-Instruct-quantized.w8a8 + +## Model Overview +- **Model Architecture:** Llama-3 + - **Input:** Text + - **Output:** Text +- **Model Optimizations:** + - **Activation quantization:** INT8 + - **Weight quantization:** INT8 +- **Intended Use Cases:** Intended for commercial and research use multiple languages. Similarly to [Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct), this models is intended for assistant-like chat. +- **Out-of-scope:** Use in any manner that violates applicable laws or regulations (including trade compliance laws). +- **Release Date:** 9/25/2024 +- **Version:** 1.0 +- **License(s):** Llama3.2 +- **Model Developers:** Neural Magic + +Quantized version of [Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct). +It achieves scores within 5% of the scores of the unquantized model for MMLU, ARC-Challenge, GSM-8k, Hellaswag, Winogrande and TruthfulQA. + +### Model Optimizations + +This model was obtained by quantizing the weights of [Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct) to INT8 data type. +This optimization reduces the number of bits used to represent weights and activations from 16 to 8, reducing GPU memory requirements (by approximately 50%) and increasing matrix-multiply compute throughput (by approximately 2x). +Weight quantization also reduces disk size requirements by approximately 50%. + +Only weights and activations of the linear operators within transformers blocks are quantized. +Weights are quantized with a symmetric static per-channel scheme, where a fixed linear scaling factor is applied between INT8 and floating point representations for each output channel dimension. +Activations are quantized with a symmetric dynamic per-token scheme, computing a linear scaling factor at runtime for each token between INT8 and floating point representations. +The [SmoothQuant](https://arxiv.org/abs/2211.10438) algorithm is used to alleviate outliers in the activations, whereas rhe [GPTQ](https://arxiv.org/abs/2210.17323) algorithm is applied for quantization. +Both algorithms are implemented in the [llm-compressor](https://github.com/vllm-project/llm-compressor) library. +GPTQ used a 1% damping factor and 512 sequences sequences taken from Neural Magic's [LLM compression calibration dataset](https://huggingface.co/datasets/neuralmagic/LLM_compression_calibration). + +## Deployment + +This model can be deployed efficiently using the [vLLM](https://docs.vllm.ai/en/latest/) backend, as shown in the example below. + +```python +from vllm import LLM, SamplingParams +from transformers import AutoTokenizer + +model_id = "neuralmagic/Llama-3.2-1B-Instruct-quantized.w8a8" +number_gpus = 1 +max_model_len = 8192 + +sampling_params = SamplingParams(temperature=0.6, top_p=0.9, max_tokens=256) + +tokenizer = AutoTokenizer.from_pretrained(model_id) + +messages = [ + {"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"}, + {"role": "user", "content": "Who are you?"}, +] + +prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) + +llm = LLM(model=model_id, tensor_parallel_size=number_gpus, max_model_len=max_model_len) + +outputs = llm.generate(prompts, sampling_params) + +generated_text = outputs[0].outputs[0].text +print(generated_text) +``` + +vLLM aslo supports OpenAI-compatible serving. See the [documentation](https://docs.vllm.ai/en/latest/) for more details. + + +## Creation + +This model was created by using the [llm-compressor](https://github.com/vllm-project/llm-compressor) library as presented in the code snipet below. + +```python +from transformers import AutoTokenizer +from datasets import load_dataset +from llmcompressor.transformers import SparseAutoModelForCausalLM, oneshot +from llmcompressor.modifiers.quantization import GPTQModifier, SmoothQuantModifier + +model_id = "meta-llama/Llama-3.2-1B-Instruct" + +num_samples = 512 +max_seq_len = 8192 + +tokenizer = AutoTokenizer.from_pretrained(model_id) + +def preprocess_fn(example): + return {"text": tokenizer.apply_chat_template(example["messages"], add_generation_prompt=False, tokenize=False)} + +ds = load_dataset("neuralmagic/LLM_compression_calibration", split="train") +ds = ds.shuffle().select(range(num_samples)) +ds = ds.map(preprocess_fn) + +recipe = [ + SmoothQuantModifier( + smoothing_strength=0.7, + mappings=[ + [["re:.*q_proj", "re:.*k_proj", "re:.*v_proj"], "re:.*input_layernorm"], + [["re:.*gate_proj", "re:.*up_proj"], "re:.*post_attention_layernorm"], + [["re:.*down_proj"], "re:.*up_proj"], + ], + ), + GPTQModifier( + sequential=True, + targets="Linear", + scheme="W8A8", + ignore=["lm_head"], + dampening_frac=0.01, + ) +] + +model = SparseAutoModelForCausalLM.from_pretrained( + model_id, + device_map="auto", +) + +oneshot( + model=model, + dataset=ds, + recipe=recipe, + max_seq_length=max_seq_len, + num_calibration_samples=num_samples, +) + +model.save_pretrained("Llama-3.2-1B-Instruct-quantized.w8a8") +``` + + +## Evaluation + +The model was evaluated on MMLU, ARC-Challenge, GSM-8K, Hellaswag, Winogrande and TruthfulQA. +Evaluation was conducted using the Neural Magic fork of [lm-evaluation-harness](https://github.com/neuralmagic/lm-evaluation-harness/tree/llama_3.1_instruct) (branch llama_3.1_instruct) and the [vLLM](https://docs.vllm.ai/en/stable/) engine. +This version of the lm-evaluation-harness includes versions of MMLU, ARC-Challenge and GSM-8K that match the prompting style of [Meta-Llama-3.1-Instruct-evals](https://huggingface.co/datasets/meta-llama/Meta-Llama-3.1-8B-Instruct-evals). + +### Accuracy + +#### Open LLM Leaderboard evaluation scores + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Benchmark + Llama-3.2-1B-Instruct + Llama-3.2-1B-Instruct-quantized.w8a8 (this model) + Recovery +
MMLU (5-shot) + 47.66 + 47.95 + 100.6% +
MMLU (CoT, 0-shot) + 47.10 + 44.63 + 94.8% +
ARC Challenge (0-shot) + 58.36 + 56.14 + 96.2% +
GSM-8K (CoT, 8-shot, strict-match) + 45.72 + 46.70 + 102.2% +
Hellaswag (10-shot) + 61.01 + 60.95 + 99.9% +
Winogrande (5-shot) + 62.27 + 61.33 + 98.5% +
TruthfulQA (0-shot, mc2) + 43.52 + 42.84 + 98.4% +
Average + 52.24 + 51.51 + 98.7% +
+ +### Reproduction + +The results were obtained using the following commands: + +#### MMLU +``` +lm_eval \ + --model vllm \ + --model_args pretrained="neuralmagic/Llama-3.2-1B-Instruct-quantized.w8a8",dtype=auto,add_bos_token=True,max_model_len=3850,max_gen_toks=10,tensor_parallel_size=1 \ + --tasks mmlu_llama_3.1_instruct \ + --fewshot_as_multiturn \ + --apply_chat_template \ + --num_fewshot 5 \ + --batch_size auto +``` + +#### MMLU-CoT +``` +lm_eval \ + --model vllm \ + --model_args pretrained="neuralmagic/Llama-3.2-1B-Instruct-quantized.w8a8",dtype=auto,add_bos_token=True,max_model_len=4064,max_gen_toks=1024,tensor_parallel_size=1 \ + --tasks mmlu_cot_0shot_llama_3.1_instruct \ + --apply_chat_template \ + --num_fewshot 0 \ + --batch_size auto +``` + +#### ARC-Challenge +``` +lm_eval \ + --model vllm \ + --model_args pretrained="neuralmagic/Llama-3.2-1B-Instruct-quantized.w8a8",dtype=auto,add_bos_token=True,max_model_len=3940,max_gen_toks=100,tensor_parallel_size=1 \ + --tasks arc_challenge_llama_3.1_instruct \ + --apply_chat_template \ + --num_fewshot 0 \ + --batch_size auto +``` + +#### GSM-8K +``` +lm_eval \ + --model vllm \ + --model_args pretrained="neuralmagic/Llama-3.2-1B-Instruct-quantized.w8a8",dtype=auto,add_bos_token=True,max_model_len=4096,max_gen_toks=1024,tensor_parallel_size=1 \ + --tasks gsm8k_cot_llama_3.1_instruct \ + --fewshot_as_multiturn \ + --apply_chat_template \ + --num_fewshot 8 \ + --batch_size auto +``` + +#### Hellaswag +``` +lm_eval \ + --model vllm \ + --model_args pretrained="neuralmagic/Llama-3.2-1B-Instruct-quantized.w8a8",dtype=auto,add_bos_token=True,max_model_len=4096,tensor_parallel_size=1 \ + --tasks hellaswag \ + --num_fewshot 10 \ + --batch_size auto +``` + +#### Winogrande +``` +lm_eval \ + --model vllm \ + --model_args pretrained="neuralmagic/Llama-3.2-1B-Instruct-quantized.w8a8",dtype=auto,add_bos_token=True,max_model_len=4096,tensor_parallel_size=1 \ + --tasks winogrande \ + --num_fewshot 5 \ + --batch_size auto +``` + +#### TruthfulQA +``` +lm_eval \ + --model vllm \ + --model_args pretrained="neuralmagic/Llama-3.2-1B-Instruct-quantized.w8a8",dtype=auto,add_bos_token=True,max_model_len=4096,tensor_parallel_size=1 \ + --tasks truthfulqa \ + --num_fewshot 0 \ + --batch_size auto +``` \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..91adfb3 --- /dev/null +++ b/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3036fad522e142db0a0484a8b5258ac0fdcf676985f77f90701a31add23427af +size 2032 diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..bbeeda1 --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "text-generation", "allow_remote": true} \ No newline at end of file diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..9987b58 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,12 @@ +{ + "bos_token_id": 128000, + "do_sample": true, + "eos_token_id": [ + 128001, + 128008, + 128009 + ], + "temperature": 0.6, + "top_p": 0.9, + "transformers_version": "4.44.1" +} diff --git a/model.safetensors b/model.safetensors new file mode 100644 index 0000000..7bdd038 --- /dev/null +++ b/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb63ec84f5b37b22601df876df8a0871c85214189988e9f568ffa4e4cd57c147 +size 2024670536 diff --git a/recipe.yaml b/recipe.yaml new file mode 100644 index 0000000..e57d953 --- /dev/null +++ b/recipe.yaml @@ -0,0 +1,18 @@ +quant_stage: + quant_modifiers: + SmoothQuantModifier: + smoothing_strength: 0.7 + mappings: + - - ['re:.*q_proj', 're:.*k_proj', 're:.*v_proj'] + - re:.*input_layernorm + - - ['re:.*gate_proj', 're:.*up_proj'] + - re:.*post_attention_layernorm + - - ['re:.*down_proj'] + - re:.*up_proj + GPTQModifier: + sequential_update: true + dampening_frac: 0.01 + ignore: [lm_head] + scheme: W8A8 + targets: [Linear] + observer: mse diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..b43be96 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,17 @@ +{ + "bos_token": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": "<|eot_id|>" +} diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..aa23935 --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4745787bf5429f4558dbadb95086d68ccc290ca1fac62bdb3d05c233fab5bc40 +size 9085756 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..af04c21 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df757249013ee916c1318f170d2763cc2272d10d66c5d73a792ce34c2bd8cbb6 +size 54557