From 3ec05c84db9b6460c282747380f2dfa163b95126 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Sat, 11 Apr 2026 22:04:55 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16 Source: Original Platform --- .gitattributes | 35 ++ README.md | 805 ++++++++++++++++++++++++++++++++++++++++ config.json | 3 + configuration.json | 1 + model.safetensors | 3 + quantize_config.json | 13 + special_tokens_map.json | 16 + tokenizer.json | 3 + tokenizer_config.json | 3 + 9 files changed, 882 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 config.json create mode 100644 configuration.json create mode 100644 model.safetensors create mode 100644 quantize_config.json create mode 100644 special_tokens_map.json create mode 100644 tokenizer.json create mode 100644 tokenizer_config.json diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..a6344aa --- /dev/null +++ b/.gitattributes @@ -0,0 +1,35 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..6eea6a5 --- /dev/null +++ b/README.md @@ -0,0 +1,805 @@ +--- +language: +- en +- de +- fr +- it +- pt +- hi +- es +- th +base_model: +- meta-llama/Llama-3.1-8B-Instruct +pipeline_tag: text-generation +tags: +- llama +- facebook +- meta +- llama-3 +- int4 +- vllm +- chat +- neuralmagic +- llmcompressor +- conversational +- 4-bit precision +- gptq +- compressed-tensors +license: llama3.1 +license_name: llama3.1 +name: RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16 +description: This model was obtained by quantizing the weights of Meta-Llama-3.1-8B-Instruct to INT4 data type. +readme: https://huggingface.co/RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16/main/README.md +tasks: +- text-to-text +provider: Meta +license_link: https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE +validated_on: + - RHOAI 2.20 + - RHAIIS 3.0 + - RHELAI 1.5 +--- +

+ Meta-Llama-3.1-8B-Instruct-quantized.w4a16 + Model Icon +

+ + +Validated Badge + + +## Model Overview +- **Model Architecture:** Meta-Llama-3 + - **Input:** Text + - **Output:** Text +- **Model Optimizations:** + - **Weight quantization:** INT4 +- **Intended Use Cases:** Intended for commercial and research use in English. Similarly to [Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct), this models is intended for assistant-like chat. +- **Out-of-scope:** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in languages other than English. +- **Release Date:** 7/26/2024 +- **Version:** 1.0 +- **Validated on:** RHOAI 2.20, RHAIIS 3.0, RHELAI 1.5 +- **License(s):** Llama3.1 +- **Model Developers:** Neural Magic + +This model is a quantized version of [Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct). +It was evaluated on a several tasks to assess the its quality in comparison to the unquatized model, including multiple-choice, math reasoning, and open-ended text generation. +Meta-Llama-3.1-8B-Instruct-quantized.w4a16 achieves 93.0% recovery for the Arena-Hard evaluation, 98.9% for OpenLLM v1 (using Meta's prompting when available), 96.1% for OpenLLM v2, 99.7% for HumanEval pass@1, and 97.4% for HumanEval+ pass@1. + +### Model Optimizations + +This model was obtained by quantizing the weights of [Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct) to INT4 data type. +This optimization reduces the number of bits per parameter from 16 to 4, reducing the disk size and GPU memory requirements by approximately 75%. + +Only the weights of the linear operators within transformers blocks are quantized. +Symmetric per-group quantization is applied, in which a linear scaling per group of 128 parameters maps the INT4 and floating point representations of the quantized weights. +[AutoGPTQ](https://github.com/AutoGPTQ/AutoGPTQ) is used for quantization with 10% damping factor and 768 sequences taken from Neural Magic's [LLM compression calibration dataset](https://huggingface.co/datasets/neuralmagic/LLM_compression_calibration). + + +## Deployment + +This model can be deployed efficiently using the [vLLM](https://docs.vllm.ai/en/latest/) backend, as shown in the example below. + +```python +from vllm import LLM, SamplingParams +from transformers import AutoTokenizer + +model_id = "RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16" +number_gpus = 1 +max_model_len = 8192 + +sampling_params = SamplingParams(temperature=0.6, top_p=0.9, max_tokens=256) + +tokenizer = AutoTokenizer.from_pretrained(model_id) + +messages = [ + {"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"}, + {"role": "user", "content": "Who are you?"}, +] + +prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) + +llm = LLM(model=model_id, tensor_parallel_size=number_gpus, max_model_len=max_model_len) + +outputs = llm.generate(prompts, sampling_params) + +generated_text = outputs[0].outputs[0].text +print(generated_text) +``` + +vLLM also supports OpenAI-compatible serving. See the [documentation](https://docs.vllm.ai/en/latest/) for more details. + +
+ Deploy on Red Hat AI Inference Server + +```bash +podman run --rm -it --device nvidia.com/gpu=all -p 8000:8000 \ + --ipc=host \ +--env "HUGGING_FACE_HUB_TOKEN=$HF_TOKEN" \ +--env "HF_HUB_OFFLINE=0" -v ~/.cache/vllm:/home/vllm/.cache \ +--name=vllm \ +registry.access.redhat.com/rhaiis/rh-vllm-cuda \ +vllm serve \ +--tensor-parallel-size 8 \ +--max-model-len 32768 \ +--enforce-eager --model RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16 +``` +​​See [Red Hat AI Inference Server documentation](https://docs.redhat.com/en/documentation/red_hat_ai_inference_server/) for more details. +
+ +
+ Deploy on Red Hat Enterprise Linux AI + +```bash +# Download model from Red Hat Registry via docker +# Note: This downloads the model to ~/.cache/instructlab/models unless --model-dir is specified. +ilab model download --repository docker://registry.redhat.io/rhelai1/llama-3-1-8b-instruct-quantized-w4a16:1.5 +``` + +```bash +# Serve model via ilab +ilab model serve --model-path ~/.cache/instructlab/models/llama-3-1-8b-instruct-quantized-w4a16 + +# Chat with model +ilab model chat --model ~/.cache/instructlab/models/llama-3-1-8b-instruct-quantized-w4a16 +``` +See [Red Hat Enterprise Linux AI documentation](https://docs.redhat.com/en/documentation/red_hat_enterprise_linux_ai/1.4) for more details. +
+ +
+ Deploy on Red Hat Openshift AI + +```python +# Setting up vllm server with ServingRuntime +# Save as: vllm-servingruntime.yaml +apiVersion: serving.kserve.io/v1alpha1 +kind: ServingRuntime +metadata: + name: vllm-cuda-runtime # OPTIONAL CHANGE: set a unique name + annotations: + openshift.io/display-name: vLLM NVIDIA GPU ServingRuntime for KServe + opendatahub.io/recommended-accelerators: '["nvidia.com/gpu"]' + labels: + opendatahub.io/dashboard: 'true' +spec: + annotations: + prometheus.io/port: '8080' + prometheus.io/path: '/metrics' + multiModel: false + supportedModelFormats: + - autoSelect: true + name: vLLM + containers: + - name: kserve-container + image: quay.io/modh/vllm:rhoai-2.20-cuda # CHANGE if needed. If AMD: quay.io/modh/vllm:rhoai-2.20-rocm + command: + - python + - -m + - vllm.entrypoints.openai.api_server + args: + - "--port=8080" + - "--model=/mnt/models" + - "--served-model-name={{.Name}}" + env: + - name: HF_HOME + value: /tmp/hf_home + ports: + - containerPort: 8080 + protocol: TCP +``` + +```python +# Attach model to vllm server. This is an NVIDIA template +# Save as: inferenceservice.yaml +apiVersion: serving.kserve.io/v1beta1 +kind: InferenceService +metadata: + annotations: + openshift.io/display-name: llama-3-1-8b-instruct-quantized-w4a16 # OPTIONAL CHANGE + serving.kserve.io/deploymentMode: RawDeployment + name: llama-3-1-8b-instruct-quantized-w4a16 # specify model name. This value will be used to invoke the model in the payload + labels: + opendatahub.io/dashboard: 'true' +spec: + predictor: + maxReplicas: 1 + minReplicas: 1 + model: + modelFormat: + name: vLLM + name: '' + resources: + limits: + cpu: '2' # this is model specific + memory: 8Gi # this is model specific + nvidia.com/gpu: '1' # this is accelerator specific + requests: # same comment for this block + cpu: '1' + memory: 4Gi + nvidia.com/gpu: '1' + runtime: vllm-cuda-runtime # must match the ServingRuntime name above + storageUri: oci://registry.redhat.io/rhelai1/modelcar-llama-3-1-8b-instruct-quantized-w4a16:1.5 + tolerations: + - effect: NoSchedule + key: nvidia.com/gpu + operator: Exists +``` + +```bash +# make sure first to be in the project where you want to deploy the model +# oc project + +# apply both resources to run model + +# Apply the ServingRuntime +oc apply -f vllm-servingruntime.yaml + +# Apply the InferenceService +oc apply -f qwen-inferenceservice.yaml +``` + +```python +# Replace and below: +# - Run `oc get inferenceservice` to find your URL if unsure. + +# Call the server using curl: +curl https://-predictor-default./v1/chat/completions + -H "Content-Type: application/json" \ + -d '{ + "model": "llama-3-1-8b-instruct-quantized-w4a16", + "stream": true, + "stream_options": { + "include_usage": true + }, + "max_tokens": 1, + "messages": [ + { + "role": "user", + "content": "How can a bee fly when its wings are so small?" + } + ] +}' + +``` + +See [Red Hat Openshift AI documentation](https://docs.redhat.com/en/documentation/red_hat_openshift_ai/2025) for more details. +
+ + +## Creation + +This model was created by applying the [AutoGPTQ](https://github.com/AutoGPTQ/AutoGPTQ) library as presented in the code snipet below. +Although AutoGPTQ was used for this particular model, Neural Magic is transitioning to using [llm-compressor](https://github.com/vllm-project/llm-compressor) which supports several quantization schemes and models not supported by AutoGPTQ. + +```python +from transformers import AutoTokenizer +from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig +from datasets import load_dataset + +model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct" + +num_samples = 756 +max_seq_len = 4064 + +tokenizer = AutoTokenizer.from_pretrained(model_id) + +def preprocess_fn(example): + return {"text": tokenizer.apply_chat_template(example["messages"], add_generation_prompt=False, tokenize=False)} + +ds = load_dataset("neuralmagic/LLM_compression_calibration", split="train") +ds = ds.shuffle().select(range(num_samples)) +ds = ds.map(preprocess_fn) + +examples = [tokenizer(example["text"], padding=False, max_length=max_seq_len, truncation=True) for example in ds] + +quantize_config = BaseQuantizeConfig( + bits=4, + group_size=128, + desc_act=True, + model_file_base_name="model", + damp_percent=0.1, +) + +model = AutoGPTQForCausalLM.from_pretrained( + model_id, + quantize_config, + device_map="auto", +) + +model.quantize(examples) +model.save_pretrained("Meta-Llama-3.1-8B-Instruct-quantized.w4a16") +``` + +## Evaluation + +This model was evaluated on the well-known Arena-Hard, OpenLLM v1, OpenLLM v2, HumanEval, and HumanEval+ benchmarks. +In all cases, model outputs were generated with the [vLLM](https://docs.vllm.ai/en/stable/) engine. + +Arena-Hard evaluations were conducted using the [Arena-Hard-Auto](https://github.com/lmarena/arena-hard-auto) repository. +The model generated a single answer for each prompt form Arena-Hard, and each answer was judged twice by GPT-4. +We report below the scores obtained in each judgement and the average. + +OpenLLM v1 and v2 evaluations were conducted using Neural Magic's fork of [lm-evaluation-harness](https://github.com/neuralmagic/lm-evaluation-harness/tree/llama_3.1_instruct) (branch llama_3.1_instruct). +This version of the lm-evaluation-harness includes versions of MMLU, ARC-Challenge and GSM-8K that match the prompting style of [Meta-Llama-3.1-Instruct-evals](https://huggingface.co/datasets/meta-llama/Meta-Llama-3.1-8B-Instruct-evals) and a few fixes to OpenLLM v2 tasks. + +HumanEval and HumanEval+ evaluations were conducted using Neural Magic's fork of the [EvalPlus](https://github.com/neuralmagic/evalplus) repository. + +Detailed model outputs are available as HuggingFace datasets for [Arena-Hard](https://huggingface.co/datasets/neuralmagic/quantized-llama-3.1-arena-hard-evals), [OpenLLM v2](https://huggingface.co/datasets/neuralmagic/quantized-llama-3.1-leaderboard-v2-evals), and [HumanEval](https://huggingface.co/datasets/neuralmagic/quantized-llama-3.1-humaneval-evals). + +**Note:** Results have been updated after Meta modified the chat template. + +### Accuracy + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Category + Benchmark + Meta-Llama-3.1-8B-Instruct + Meta-Llama-3.1-8B-Instruct-quantized.w4a16 (this model) + Recovery +
LLM as a judge + Arena Hard + 25.8 (25.1 / 26.5) + 27.2 (27.6 / 26.7) + 105.4% +
OpenLLM v1 + MMLU (5-shot) + 68.3 + 66.9 + 97.9% +
MMLU (CoT, 0-shot) + 72.8 + 71.1 + 97.6% +
ARC Challenge (0-shot) + 81.4 + 80.2 + 98.0% +
GSM-8K (CoT, 8-shot, strict-match) + 82.8 + 82.9 + 100.2% +
Hellaswag (10-shot) + 80.5 + 79.9 + 99.3% +
Winogrande (5-shot) + 78.1 + 78.0 + 99.9% +
TruthfulQA (0-shot, mc2) + 54.5 + 52.8 + 96.9% +
Average + 74.3 + 73.5 + 98.9% +
OpenLLM v2 + MMLU-Pro (5-shot) + 30.8 + 28.8 + 93.6% +
IFEval (0-shot) + 77.9 + 76.3 + 98.0% +
BBH (3-shot) + 30.1 + 28.9 + 96.1% +
Math-lvl-5 (4-shot) + 15.7 + 14.8 + 94.4% +
GPQA (0-shot) + 3.7 + 4.0 + 109.8% +
MuSR (0-shot) + 7.6 + 6.3 + 83.2% +
Average + 27.6 + 26.5 + 96.1% +
Coding + HumanEval pass@1 + 67.3 + 67.1 + 99.7% +
HumanEval+ pass@1 + 60.7 + 59.1 + 97.4% +
Multilingual + Portuguese MMLU (5-shot) + 59.96 + 58.69 + 97.9% +
Spanish MMLU (5-shot) + 60.25 + 58.39 + 96.9% +
Italian MMLU (5-shot) + 59.23 + 57.82 + 97.6% +
German MMLU (5-shot) + 58.63 + 56.22 + 95.9% +
French MMLU (5-shot) + 59.65 + 57.58 + 96.5% +
Hindi MMLU (5-shot) + 50.10 + 47.14 + 94.1% +
Thai MMLU (5-shot) + 49.12 + 46.72 + 95.1% +
+ + +### Reproduction + +The results were obtained using the following commands: + +#### MMLU +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,max_model_len=3850,max_gen_toks=10,tensor_parallel_size=1 \ + --tasks mmlu_llama_3.1_instruct \ + --fewshot_as_multiturn \ + --apply_chat_template \ + --num_fewshot 5 \ + --batch_size auto +``` + +#### MMLU-CoT +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,max_model_len=4064,max_gen_toks=1024,tensor_parallel_size=1 \ + --tasks mmlu_cot_0shot_llama_3.1_instruct \ + --apply_chat_template \ + --num_fewshot 0 \ + --batch_size auto +``` + +#### ARC-Challenge +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,max_model_len=3940,max_gen_toks=100,tensor_parallel_size=1 \ + --tasks arc_challenge_llama_3.1_instruct \ + --apply_chat_template \ + --num_fewshot 0 \ + --batch_size auto +``` + +#### GSM-8K +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,max_model_len=4096,max_gen_toks=1024,tensor_parallel_size=1 \ + --tasks gsm8k_cot_llama_3.1_instruct \ + --fewshot_as_multiturn \ + --apply_chat_template \ + --num_fewshot 8 \ + --batch_size auto +``` + +#### Hellaswag +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,add_bos_token=True,max_model_len=4096,tensor_parallel_size=1 \ + --tasks hellaswag \ + --num_fewshot 10 \ + --batch_size auto +``` + +#### Winogrande +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,add_bos_token=True,max_model_len=4096,tensor_parallel_size=1 \ + --tasks winogrande \ + --num_fewshot 5 \ + --batch_size auto +``` + +#### TruthfulQA +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,add_bos_token=True,max_model_len=4096,tensor_parallel_size=1 \ + --tasks truthfulqa \ + --num_fewshot 0 \ + --batch_size auto +``` + +#### OpenLLM v2 +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,max_model_len=4096,tensor_parallel_size=1,enable_chunked_prefill=True \ + --apply_chat_template \ + --fewshot_as_multiturn \ + --tasks leaderboard \ + --batch_size auto +``` + +#### MMLU Portuguese +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,max_model_len=3850,max_gen_toks=10,tensor_parallel_size=1 \ + --tasks mmlu_pt_llama_3.1_instruct \ + --fewshot_as_multiturn \ + --apply_chat_template \ + --num_fewshot 5 \ + --batch_size auto +``` + +#### MMLU Spanish +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,max_model_len=3850,max_gen_toks=10,tensor_parallel_size=1 \ + --tasks mmlu_es_llama_3.1_instruct \ + --fewshot_as_multiturn \ + --apply_chat_template \ + --num_fewshot 5 \ + --batch_size auto +``` + +#### MMLU Italian +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,max_model_len=3850,max_gen_toks=10,tensor_parallel_size=1 \ + --tasks mmlu_it_llama_3.1_instruct \ + --fewshot_as_multiturn \ + --apply_chat_template \ + --num_fewshot 5 \ + --batch_size auto +``` + +#### MMLU German +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,max_model_len=3850,max_gen_toks=10,tensor_parallel_size=1 \ + --tasks mmlu_de_llama_3.1_instruct \ + --fewshot_as_multiturn \ + --apply_chat_template \ + --num_fewshot 5 \ + --batch_size auto +``` + +#### MMLU French +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,max_model_len=3850,max_gen_toks=10,tensor_parallel_size=1 \ + --tasks mmlu_fr_llama_3.1_instruct \ + --fewshot_as_multiturn \ + --apply_chat_template \ + --num_fewshot 5 \ + --batch_size auto +``` + +#### MMLU Hindi +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,max_model_len=3850,max_gen_toks=10,tensor_parallel_size=1 \ + --tasks mmlu_hi_llama_3.1_instruct \ + --fewshot_as_multiturn \ + --apply_chat_template \ + --num_fewshot 5 \ + --batch_size auto +``` + +#### MMLU Thai +``` +lm_eval \ + --model vllm \ + --model_args pretrained="RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",dtype=auto,max_model_len=3850,max_gen_toks=10,tensor_parallel_size=1 \ + --tasks mmlu_th_llama_3.1_instruct \ + --fewshot_as_multiturn \ + --apply_chat_template \ + --num_fewshot 5 \ + --batch_size auto +``` + +#### HumanEval and HumanEval+ +##### Generation +``` +python3 codegen/generate.py \ + --model RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w4a16 \ + --bs 16 \ + --temperature 0.2 \ + --n_samples 50 \ + --root "." \ + --dataset humaneval +``` +##### Sanitization +``` +python3 evalplus/sanitize.py \ + humaneval/neuralmagic--Meta-Llama-3.1-8B-Instruct-quantized.w4a16_vllm_temp_0.2 +``` +##### Evaluation +``` +evalplus.evaluate \ + --dataset humaneval \ + --samples humaneval/neuralmagic--Meta-Llama-3.1-8B-Instruct-quantized.w4a16_vllm_temp_0.2-sanitized +``` diff --git a/config.json b/config.json new file mode 100644 index 0000000..c6b0a94 --- /dev/null +++ b/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2102f4f11a02b800b914eab88e466ee8f89e1411d23e2c883c5dd45c143e00ae +size 1259 diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..bbeeda1 --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "text-generation", "allow_remote": true} \ No newline at end of file diff --git a/model.safetensors b/model.safetensors new file mode 100644 index 0000000..943f7cf --- /dev/null +++ b/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b65a3d34b7b4d350f0cbee60f4e690d1eee1411e7f484db0a8cfae081458602 +size 5735720552 diff --git a/quantize_config.json b/quantize_config.json new file mode 100644 index 0000000..b06715d --- /dev/null +++ b/quantize_config.json @@ -0,0 +1,13 @@ +{ + "bits": 4, + "group_size": 128, + "damp_percent": 0.1, + "desc_act": true, + "static_groups": false, + "sym": true, + "true_sequential": true, + "model_name_or_path": null, + "model_file_base_name": "model", + "is_marlin_format": false, + "quant_method": "gptq" +} \ No newline at end of file diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..02ee80b --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,16 @@ +{ + "bos_token": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..66cd9d7 --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79e3e522635f3171300913bb421464a87de6222182a0570b9b2ccba2a964b2b4 +size 9085657 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..9228e5c --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:177c7b61e616fecb84c17ce0591acb92c6c4d60e9ac5ababfb940ff23bbcd424 +size 55351