From f514980e539cebca593adfcc9a46c759d1954e92 Mon Sep 17 00:00:00 2001 From: Eugenio Schiavoni Date: Sat, 11 May 2024 05:47:51 +0000 Subject: [PATCH] Update README.md --- README.md | 49 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 1f04509..e99e303 100644 --- a/README.md +++ b/README.md @@ -10,10 +10,14 @@ base_model: - mlabonne/ChimeraLlama-3-8B-v2 - nbeerbower/llama-3-stella-8B - uygarkurt/llama-3-merged-linear +license: other --- # NeuralLLaMa-3-8b-DT-v0.1 + +![image/png](https://cdn-uploads.huggingface.co/production/uploads/64d71ab4089bc502ceb44d29/tK72e9RGnYyBVRy0T_Kba.png) + NeuralLLaMa-3-8b-DT-v0.1 is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [mlabonne/ChimeraLlama-3-8B-v2](https://huggingface.co/mlabonne/ChimeraLlama-3-8B-v2) * [nbeerbower/llama-3-stella-8B](https://huggingface.co/nbeerbower/llama-3-stella-8B) @@ -43,28 +47,43 @@ parameters: int8_mask: true dtype: float16 ``` +## 🗨️ Chats + +![image/png](https://cdn-uploads.huggingface.co/production/uploads/64d71ab4089bc502ceb44d29/feYEkbM_TqeahAMOoiGoG.png) ## 💻 Usage ```python -!pip install -qU transformers accelerate +!pip install -qU transformers accelerate bitsandbytes -from transformers import AutoTokenizer -import transformers +from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer, BitsAndBytesConfig import torch -model = "Kukedlc/NeuralLLaMa-3-8b-DT-v0.1" -messages = [{"role": "user", "content": "What is a large language model?"}] - -tokenizer = AutoTokenizer.from_pretrained(model) -prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) -pipeline = transformers.pipeline( - "text-generation", - model=model, - torch_dtype=torch.float16, - device_map="auto", +bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.bfloat16 ) -outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) -print(outputs[0]["generated_text"]) +MODEL_NAME = 'Kukedlc/NeuralLLaMa-3-8b-DT-v0.1' +tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) +model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map='cuda:0', quantization_config=bnb_config) + +prompt_system = "You are an advanced language model that speaks Spanish fluently, clearly, and precisely.\ +You are called Roberto the Robot and you are an aspiring post-modern artist." +prompt = "Create a piece of art that represents how you see yourself, Roberto, as an advanced LLm, with ASCII art, mixing diagrams, engineering and let yourself go." + +chat = [ + {"role": "system", "content": f"{prompt_system}"}, + {"role": "user", "content": f"{prompt}"}, +] + +chat = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) +inputs = tokenizer(chat, return_tensors="pt").to('cuda') +streamer = TextStreamer(tokenizer) +stop_token = "<|eot_id|>" +stop = tokenizer.encode(stop_token)[0] + +_ = model.generate(**inputs, streamer=streamer, max_new_tokens=1024, do_sample=True, temperature=0.7, repetition_penalty=1.2, top_p=0.9, eos_token_id=stop) ``` \ No newline at end of file