From 9b2a039ab434678664d223e0bf2d8df0f0b57705 Mon Sep 17 00:00:00 2001 From: Almeida Date: Wed, 27 Sep 2023 17:06:09 +0000 Subject: [PATCH] Upload folder using huggingface_hub (#1) - Upload folder using huggingface_hub (13f22f09c60b6f1bd394e97fcdebb32edbf3ddf6) --- .gitattributes | 3 + README.md | 150 ++++++++++++++++++++++++++++++++++++++++++- ggml-model-Q4_0.gguf | 3 + ggml-model-Q8_0.gguf | 3 + ggml-model-f16.gguf | 3 + 5 files changed, 161 insertions(+), 1 deletion(-) create mode 100644 ggml-model-Q4_0.gguf create mode 100644 ggml-model-Q8_0.gguf create mode 100644 ggml-model-f16.gguf diff --git a/.gitattributes b/.gitattributes index a6344aa..7968f77 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +ggml-model-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text +ggml-model-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +ggml-model-f16.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md index 2b16176..14cbf25 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,151 @@ --- -license: cc-by-3.0 +license: cc-by-sa-3.0 +datasets: +- VMware/open-instruct +language: +- en +library_name: transformers +pipeline_tag: text-generation --- + +# Open LLama 7B v2 Open Instruct +- Model creator: [VMware](https://huggingface.co/VMware) +- Original model: [](https://huggingface.co/VMware/open-llama-7b-v2-open-instruct) + +## Description + +This repo contains the GGUF model files for [Open LLama 7B v2 Open Instruct](https://huggingface.co/VMware/open-llama-7b-v2-open-instruct). + +These files are compatible with [llama.cpp](https://github.com/ggerganov/llama.cpp). + +# VMware/open-llama-7B-v2-open-instruct +Instruction-tuned version of the fully trained Open LLama 7B v2 model. The model is open for COMMERCIAL USE.
+ +- This model performs better on code compared to v1 due to the improvements made on the base model by the openlm-research team. +- The instruction model is trained on an improved instruction tuning dataset compared to v1 + +**NOTE**: The model was trained using the Alpaca prompt template
+**NOTE**: Fast tokenizer results in incorrect encoding, set the ```use_fast = False``` parameter, when instantiating the tokenizer + + +## License +- CC BY-SA-3.0 **(Commercially Viable!)** +- Base Language Model ([openlm-research/open_llama_v2_7b](https://huggingface.co/openlm-research/open_llama_v2_7b)) is under apache-2.0 +- Fine-Tuning Dataset ([VMware/open-instruct](https://huggingface.co/datasets/VMware/open-instruct)) is under cc-by-sa-3.0 + +## Datasets used for Fine-Tuning + +### Open-instruct + +**Open-instruct-v1** +- Mosaic/Dolly-HHRLHF + filtered OASST1 - cc by 3.0 + +**Subset of COT SUBMIX (FROM FLAN V2) Zeroshot examples** +- ESNLI - MIT +- ECQA - CDLA 1.0 - Sharing +- Strategy - MIT +- CREAK - MIT +- gsmk8 - MIT +- aqua - MIT +- qasc - Apache 2.0 + + +## Nomenclature + +- Model : Open-llama-v2 +- Model Size: 7B parameters +- Dataset: Open-instruct + + +## Use in Transformers + +``` +import os +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + +model_name = 'VMware/open-llama-7b-v2-open-instruct' + + +tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) + +model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map='sequential') + +prompt_template = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:" + +prompt = """What is attention mechanism of a transformer model? + Write a python code to illustrate how attention works within a transformer model using numpy library. Donot use pytorch or tensorflow.""" + + +inputt = prompt_template.format(instruction= prompt) +input_ids = tokenizer(inputt, return_tensors="pt").input_ids.to("cuda") + +output1 = model.generate(input_ids, max_length=512) +input_length = input_ids.shape[1] +output1 = output1[:, input_length:] +output = tokenizer.decode(output1[0]) + +print(output) + +``` + + +### Output + +Sure, I can help you with that! + +Attention mechanisms in transformer models are typically implemented using the attention mechanism in the self-attention layer. Self-attention allows the model to focus on different parts of the input sequence when processing it. This is achieved by computing a set of attention weights, which are used to weigh the contribution of each input element to the output. + +Here's an example code using NumPy to illustrate how attention works in a transformer model: + +```python +import numpy as np + +def attention_weights(query, key, value, mask): + # Query, key, and value are input tensors. Mask is a tensor of zeros and ones that represents the attention mask. + # It is used to prevent the model from attending to certain positions in the input sequence if they are not relevant. + # The attention weights are the element-wise product of the query, key, and mask tensors. + # The result is a tensor of the same shape as the query tensor. + + # Compute the dot product between the query tensor and the key tensor + dot = np.matmul(query, key) + + # Compute the element-wise softmax of the dot product tensor + exp_dot = np.exp(dot) + + # Multiply the dot product and the softmax of the dot product tensors + weights = dot * exp_dot + + # Return the attention weights as a NumPy tensor + return weights + +# Define the input sequence +query = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]) +key = np.array([[0.1, 0.2], [0.3, 0.4]]) +value = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]) +mask = np.array([[False, True, True], [False, True, True]]) + +# Compute the attention weights +weights = attention_weights(query, key, value, mask) + +# Print the attention weights +print(weights) +``` + +In this example, the `attention_weights` function takes as input the query tensor, key tensor, value tensor, and mask tensor. It computes the dot product between the query and key tensors using the `np.matmul` function, and then applies a softmax function using the `np.exp` function to the element-wise dot product tensor. It then multiplies the dot product and softmax tensors using the `np.matmul` function, and returns the result as a NumPy tensor. + +The `query`, `key`, and `value` tensors represent the input sequence to the transformer model. The `mask` tensor represents the attention mask, which is used to prevent the model from attending to certain positions in the input sequence if they are not relevant. + +The output of the `attention_weights` function is a NumPy tensor that represents the attention weights for the input sequence. These weights are used by the transformer model to weigh the contribution of each input element to the output. + +I hope this helps! +
+ + +## Finetuning details +The finetuning scripts will be available in our [RAIL Github Repository](https://github.com/vmware-labs/research-and-development-artificial-intelligence-lab/tree/main/instruction-tuning) + + +## Evaluation + +**TODO** diff --git a/ggml-model-Q4_0.gguf b/ggml-model-Q4_0.gguf new file mode 100644 index 0000000..915cbd8 --- /dev/null +++ b/ggml-model-Q4_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15709803fa2158f52687213188651e4d64a290101a4707339a0550dff4e64212 +size 3825818912 diff --git a/ggml-model-Q8_0.gguf b/ggml-model-Q8_0.gguf new file mode 100644 index 0000000..7f31037 --- /dev/null +++ b/ggml-model-Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:243c305cdd04816039a5f20be4417b257c37fa4ef9f8ab6505b79305d463de22 +size 7161101600 diff --git a/ggml-model-f16.gguf b/ggml-model-f16.gguf new file mode 100644 index 0000000..5402101 --- /dev/null +++ b/ggml-model-f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c35cfd775e365a800da010e01b379b86840c727d370114e2c87fa98da8c09e08 +size 13478116608