diff --git a/.gitattributes b/.gitattributes index 53d7257..ccda318 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,47 +1,47 @@ *.7z filter=lfs diff=lfs merge=lfs -text *.arrow filter=lfs diff=lfs merge=lfs -text *.bin filter=lfs diff=lfs merge=lfs -text -*.bin.* filter=lfs diff=lfs merge=lfs -text *.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text *.ftz filter=lfs diff=lfs merge=lfs -text *.gz filter=lfs diff=lfs merge=lfs -text *.h5 filter=lfs diff=lfs merge=lfs -text *.joblib filter=lfs diff=lfs merge=lfs -text *.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text *.model filter=lfs diff=lfs merge=lfs -text *.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text *.onnx filter=lfs diff=lfs merge=lfs -text *.ot filter=lfs diff=lfs merge=lfs -text *.parquet filter=lfs diff=lfs merge=lfs -text *.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text *.pt filter=lfs diff=lfs merge=lfs -text *.pth filter=lfs diff=lfs merge=lfs -text *.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text *.tflite filter=lfs diff=lfs merge=lfs -text *.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text *.xz filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text -*.zstandard filter=lfs diff=lfs merge=lfs -text -*.tfevents* filter=lfs diff=lfs merge=lfs -text -*.db* filter=lfs diff=lfs merge=lfs -text -*.ark* filter=lfs diff=lfs merge=lfs -text -**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text -**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text -**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text -*.safetensors filter=lfs diff=lfs merge=lfs -text -*.ckpt filter=lfs diff=lfs merge=lfs -text -*.gguf* filter=lfs diff=lfs merge=lfs -text -*.ggml filter=lfs diff=lfs merge=lfs -text -*.llamafile* filter=lfs diff=lfs merge=lfs -text -*.pt2 filter=lfs diff=lfs merge=lfs -text -*.mlmodel filter=lfs diff=lfs merge=lfs -text -*.npy filter=lfs diff=lfs merge=lfs -text -*.npz filter=lfs diff=lfs merge=lfs -text -*.pickle filter=lfs diff=lfs merge=lfs -text -*.pkl filter=lfs diff=lfs merge=lfs -text -*.tar filter=lfs diff=lfs merge=lfs -text -*.wasm filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text -*tfevents* filter=lfs diff=lfs merge=lfs -text \ No newline at end of file +*tfevents* filter=lfs diff=lfs merge=lfs -text +sam.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +sam.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +sam.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +sam.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +sam.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text +sam.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +sam.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +sam.Q5_0.gguf filter=lfs diff=lfs merge=lfs -text +sam.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +sam.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +sam.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +sam.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md index 68d183b..f9b1a7b 100644 --- a/README.md +++ b/README.md @@ -1,47 +1,390 @@ --- -license: Apache License 2.0 +base_model: SuperAGI/SAM +inference: false +language: +- en +license: apache-2.0 +model_creator: SuperAGI +model_name: SAM +model_type: mistral +prompt_template: '[INST] {prompt} [/INST] -#model-type: -##如 gpt、phi、llama、chatglm、baichuan 等 -#- gpt - -#domain: -##如 nlp、cv、audio、multi-modal -#- nlp - -#language: -##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa -#- cn - -#metrics: -##如 CIDEr、Blue、ROUGE 等 -#- CIDEr - -#tags: -##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他 -#- pretrained - -#tools: -##如 vllm、fastchat、llamacpp、AdaSeq 等 -#- vllm + ' +quantized_by: TheBloke --- -### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。 -#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型 + + + + +
+TheBloke's LLM work is generously supported by a grant from andreessen horowitz (a16z)
如果您是本模型的贡献者,我们邀请您根据模型贡献文档,及时完善模型卡片内容。
\ No newline at end of file +## How to use with LangChain + +Here are guides on using llama-cpp-python and ctransformers with LangChain: + +* [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) +* [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) + + + + + +## Discord + +For further support, and discussions on these models and AI in general, join us at: + +[TheBloke AI's Discord server](https://discord.gg/theblokeai) + +## Thanks, and how to contribute + +Thanks to the [chirper.ai](https://chirper.ai) team! + +Thanks to Clay from [gpus.llm-utils.org](llm-utils)! + +I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. + +If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. + +Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. + +* Patreon: https://patreon.com/TheBlokeAI +* Ko-Fi: https://ko-fi.com/TheBlokeAI + +**Special thanks to**: Aemon Algiz. + +**Patreon special mentions**: Michael Levine, 阿明, Trailburnt, Nikolai Manek, John Detwiler, Randy H, Will Dee, Sebastain Graf, NimbleBox.ai, Eugene Pentland, Emad Mostaque, Ai Maven, Jim Angel, Jeff Scroggin, Michael Davis, Manuel Alberto Morcote, Stephen Murray, Robert, Justin Joy, Luke @flexchar, Brandon Frisco, Elijah Stavena, S_X, Dan Guido, Undi ., Komninos Chatzipapas, Shadi, theTransient, Lone Striker, Raven Klaugh, jjj, Cap'n Zoog, Michel-Marie MAUDET (LINAGORA), Matthew Berman, David, Fen Risland, Omer Bin Jawed, Luke Pendergrass, Kalila, OG, Erik Bjäreholt, Rooh Singh, Joseph William Delisle, Dan Lewis, TL, John Villwock, AzureBlack, Brad, Pedro Madruga, Caitlyn Gatomon, K, jinyuan sun, Mano Prime, Alex, Jeffrey Morgan, Alicia Loh, Illia Dulskyi, Chadd, transmissions 11, fincy, Rainer Wilmers, ReadyPlayerEmma, knownsqashed, Mandus, biorpg, Deo Leter, Brandon Phillips, SuperWojo, Sean Connelly, Iucharbius, Jack West, Harry Royden McLaughlin, Nicholas, terasurfer, Vitor Caleffi, Duane Dunston, Johann-Peter Hartmann, David Ziegler, Olakabola, Ken Nordquist, Trenton Dambrowitz, Tom X Nguyen, Vadim, Ajan Kanaga, Leonard Tan, Clay Pascal, Alexandros Triantafyllidis, JM33133, Xule, vamX, ya boyyy, subjectnull, Talal Aujan, Alps Aficionado, wassieverse, Ari Malik, James Bentley, Woland, Spencer Kim, Michael Dempsey, Fred von Graf, Elle, zynix, William Richards, Stanislav Ovsiannikov, Edmond Seymore, Jonathan Leane, Martin Kemka, usrbinkat, Enrico Ros + + +Thank you to all my generous patrons and donaters! + +And thank you again to a16z for their generous grant. + + + + +# Original model card: SuperAGI's SAM + +# Model Card +SAM (Small Agentic Model), a 7B model that demonstrates impressive reasoning abilities despite its smaller size. SAM-7B has outperformed existing SoTA models on various reasoning benchmarks, including GSM8k and ARC-C. + +For full details of this model please read our [release blog post](https://superagi.com/introducing-sam-small-agentic-model/). + +# Key Contributions +- SAM-7B outperforms GPT 3.5, Orca, and several other 70B models on multiple reasoning benchmarks, including ARC-C and GSM8k. +- Interestingly, despite being trained on a 97% smaller dataset, SAM-7B surpasses Orca-13B on GSM8k. +- All responses in our fine-tuning dataset are generated by open-source models without any assistance from state-of-the-art models like GPT-3.5 or GPT-4. + +## Training + - Trained by: SuperAGI Team + - Hardware: NVIDIA 6 x H100 SxM (80GB) + - Model used: Mistral 7B + - Duration of finetuning: 4 hours + - Number of epochs: 1 + - Batch size: 16 + - Learning Rate: 2e-5 + - Warmup Ratio: 0.1 + - Optmizer: AdamW + - Scheduler: Cosine + +## Example Prompt + +The template used to build a prompt for the Instruct model is defined as follows: +``` +
+
+Note: Temperature=0.3 is the suggested for optimal performance
+
+## Run the model
+
+```python
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+model_id = "SuperAGI/SAM"
+tokenizer = AutoTokenizer.from_pretrained(model_id)
+
+model = AutoModelForCausalLM.from_pretrained(model_id)
+
+text = "Can elephants fly?"
+inputs = tokenizer(text, return_tensors="pt")
+
+outputs = model.generate(**inputs, max_new_tokens=200)
+print(tokenizer.decode(outputs[0], skip_special_tokens=True))
+```
+
+
+## Limitations
+
+SAM is a demonstration that better reasoning can be induced using less but high-quality data generated using OpenSource LLMs.
+The model is not suitable for conversations and simple Q&A, it performs better in task breakdown and reasoning only.
+It does not have any moderation mechanisms. Therefore, the model is not suitable for production usage as it doesn't have guardrails for toxicity, societal bias, and language limitations. We would love to collaborate with the community to build safer and better models.
+
+## The SuperAGI AI Team
+Anmol Gautam, Arkajit Datta, Rajat Chawla, Ayush Vatsal, Sukrit Chatterjee, Adarsh Jha, Abhijeet Sinha, Rakesh Krishna, Adarsh Deep, Ishaan Bhola, Mukunda NS, Nishant Gaurav.
+
+
diff --git a/config.json b/config.json
new file mode 100644
index 0000000..9f0f76f
--- /dev/null
+++ b/config.json
@@ -0,0 +1,3 @@
+{
+ "model_type": "mistral"
+}
\ No newline at end of file
diff --git a/configuration.json b/configuration.json
new file mode 100644
index 0000000..159097f
--- /dev/null
+++ b/configuration.json
@@ -0,0 +1 @@
+{"framework": "pytorch", "task": "others", "allow_remote": true}
\ No newline at end of file
diff --git a/sam.Q2_K.gguf b/sam.Q2_K.gguf
new file mode 100644
index 0000000..e438a8d
--- /dev/null
+++ b/sam.Q2_K.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b74175534d527200b7aed04b48aff4a169c1df51c8ebf565b96c28c1cdd0560c
+size 3084034176
diff --git a/sam.Q3_K_L.gguf b/sam.Q3_K_L.gguf
new file mode 100644
index 0000000..4af5901
--- /dev/null
+++ b/sam.Q3_K_L.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4c1e373bb77c267a7fb591d49d46c1545f0ea084e1627ab658ebd2687e3a1160
+size 3822960768
diff --git a/sam.Q3_K_M.gguf b/sam.Q3_K_M.gguf
new file mode 100644
index 0000000..eccdd7f
--- /dev/null
+++ b/sam.Q3_K_M.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8c5fb9eedd6a0a1650525bab75d279a4bb44409bcd2cb0bb4b2e920f93a20b9c
+size 3519922304
diff --git a/sam.Q3_K_S.gguf b/sam.Q3_K_S.gguf
new file mode 100644
index 0000000..a0e48d8
--- /dev/null
+++ b/sam.Q3_K_S.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f1a8e3b76ab2553a6ffc5f0ea5fab9a388f11d6b1fbd5f88f8763732172345bd
+size 3165503616
diff --git a/sam.Q4_0.gguf b/sam.Q4_0.gguf
new file mode 100644
index 0000000..2dbbfa6
--- /dev/null
+++ b/sam.Q4_0.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:20f5ae4d97094915c83f590be260499462801fb065e28187650c41e22512c773
+size 4109852800
diff --git a/sam.Q4_K_M.gguf b/sam.Q4_K_M.gguf
new file mode 100644
index 0000000..3765369
--- /dev/null
+++ b/sam.Q4_K_M.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:31e3064383bdf6b6a84bae0a391b7020467f81a855c5d4b5eb23364f080ca20c
+size 4369375360
diff --git a/sam.Q4_K_S.gguf b/sam.Q4_K_S.gguf
new file mode 100644
index 0000000..59f6718
--- /dev/null
+++ b/sam.Q4_K_S.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e1d53090da3dcb2ff529da0d545b0244e2ae764ba5e7c1d4805033ab4acb9a6f
+size 4141310080
diff --git a/sam.Q5_0.gguf b/sam.Q5_0.gguf
new file mode 100644
index 0000000..f55b95f
--- /dev/null
+++ b/sam.Q5_0.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:83665d0c22611aeda7f8bfa8b7437a75ba6ff043d794c5e0ab39769471fb75e3
+size 4998652032
diff --git a/sam.Q5_K_M.gguf b/sam.Q5_K_M.gguf
new file mode 100644
index 0000000..de9f444
--- /dev/null
+++ b/sam.Q5_K_M.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af54a24367fbb44c2359a3c49f462e95c4745b8d933ceaf6eef1c8455c2ce9b4
+size 5132345472
diff --git a/sam.Q5_K_S.gguf b/sam.Q5_K_S.gguf
new file mode 100644
index 0000000..df84110
--- /dev/null
+++ b/sam.Q5_K_S.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fb6c50956f46cd63f2f158f20445e8f6872e277c7bd0cf9f30992f21637b265b
+size 4998652032
diff --git a/sam.Q6_K.gguf b/sam.Q6_K.gguf
new file mode 100644
index 0000000..a2ff2e2
--- /dev/null
+++ b/sam.Q6_K.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9730ed095fe0026c4208e4c0e7644b01103f6378804addff4e67000a82aa95f3
+size 5943001216
diff --git a/sam.Q8_0.gguf b/sam.Q8_0.gguf
new file mode 100644
index 0000000..cd0251c
--- /dev/null
+++ b/sam.Q8_0.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fcf5a98744f2a7d7c9bd6b9df80ae751932a66a8d8e135d1535bbe279039b6f8
+size 7696793728