Update metadata with huggingface_hub

This commit is contained in:
ai-modelscope
2024-11-30 22:44:57 +08:00
parent fc178a6a9f
commit fc4e3a8629
28 changed files with 230 additions and 63 deletions

57
.gitattributes vendored
View File

@@ -1,47 +1,60 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zstandard filter=lfs diff=lfs merge=lfs -text
*.tfevents* filter=lfs diff=lfs merge=lfs -text
*.db* filter=lfs diff=lfs merge=lfs -text
*.ark* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.gguf* filter=lfs diff=lfs merge=lfs -text
*.ggml filter=lfs diff=lfs merge=lfs -text
*.llamafile* filter=lfs diff=lfs merge=lfs -text
*.pt2 filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q6_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q5_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q4_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q4_0_8_8.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q4_0_4_8.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q4_0_4_4.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q3_K_XL.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-IQ3_M.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-IQ3_XS.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q2_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-IQ2_M.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2-f16.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v2.imatrix filter=lfs diff=lfs merge=lfs -text

160
README.md
View File

@@ -1,47 +1,125 @@
---
license: Apache License 2.0
#model-type:
##如 gpt、phi、llama、chatglm、baichuan 等
#- gpt
#domain:
##如 nlp、cv、audio、multi-modal
#- nlp
#language:
##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
#- cn
#metrics:
##如 CIDEr、Blue、ROUGE 等
#- CIDEr
#tags:
##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
#- pretrained
#tools:
##如 vllm、fastchat、llamacpp、AdaSeq 等
#- vllm
base_model: TheDrummer/Tiger-Gemma-9B-v2
pipeline_tag: text-generation
quantized_by: bartowski
---
### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
#### 您可以通过如下git clone命令或者ModelScope SDK来下载模型
SDK下载
```bash
#安装ModelScope
pip install modelscope
## Llamacpp imatrix Quantizations of Tiger-Gemma-9B-v2
Using <a href="https://github.com/ggerganov/llama.cpp/">llama.cpp</a> release <a href="https://github.com/ggerganov/llama.cpp/releases/tag/b3658">b3658</a> for quantization.
Original model: https://huggingface.co/TheDrummer/Tiger-Gemma-9B-v2
All quants made using imatrix option with dataset from [here](https://gist.github.com/bartowski1182/eb213dccb3571f863da82e99418f81e8)
Run them in [LM Studio](https://lmstudio.ai/)
## Prompt format
```
```python
#SDK模型下载
from modelscope import snapshot_download
model_dir = snapshot_download('bartowski/Tiger-Gemma-9B-v2-GGUF')
```
Git下载
```
#Git模型下载
git clone https://www.modelscope.cn/bartowski/Tiger-Gemma-9B-v2-GGUF.git
<bos><start_of_turn>user
{prompt}<end_of_turn>
<start_of_turn>model
<end_of_turn>
<start_of_turn>model
```
<p style="color: lightgrey;">如果您是本模型的贡献者,我们邀请您根据<a href="https://modelscope.cn/docs/ModelScope%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%85%A5%E6%B5%81%E7%A8%8B%E6%A6%82%E8%A7%88" style="color: lightgrey; text-decoration: underline;">模型贡献文档</a>,及时完善模型卡片内容。</p>
Note that this model does not support a System prompt.
## Download a file (not the whole branch) from below:
| Filename | Quant type | File Size | Split | Description |
| -------- | ---------- | --------- | ----- | ----------- |
| [Tiger-Gemma-9B-v2-f16.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-f16.gguf) | f16 | 18.49GB | false | Full F16 weights. |
| [Tiger-Gemma-9B-v2-Q8_0.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q8_0.gguf) | Q8_0 | 9.83GB | false | Extremely high quality, generally unneeded but max available quant. |
| [Tiger-Gemma-9B-v2-Q6_K_L.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q6_K_L.gguf) | Q6_K_L | 7.81GB | false | Uses Q8_0 for embed and output weights. Very high quality, near perfect, *recommended*. |
| [Tiger-Gemma-9B-v2-Q6_K.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q6_K.gguf) | Q6_K | 7.59GB | false | Very high quality, near perfect, *recommended*. |
| [Tiger-Gemma-9B-v2-Q5_K_L.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q5_K_L.gguf) | Q5_K_L | 6.87GB | false | Uses Q8_0 for embed and output weights. High quality, *recommended*. |
| [Tiger-Gemma-9B-v2-Q5_K_M.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q5_K_M.gguf) | Q5_K_M | 6.65GB | false | High quality, *recommended*. |
| [Tiger-Gemma-9B-v2-Q5_K_S.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q5_K_S.gguf) | Q5_K_S | 6.48GB | false | High quality, *recommended*. |
| [Tiger-Gemma-9B-v2-Q4_K_L.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q4_K_L.gguf) | Q4_K_L | 5.98GB | false | Uses Q8_0 for embed and output weights. Good quality, *recommended*. |
| [Tiger-Gemma-9B-v2-Q4_K_M.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q4_K_M.gguf) | Q4_K_M | 5.76GB | false | Good quality, default size for must use cases, *recommended*. |
| [Tiger-Gemma-9B-v2-Q4_K_S.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q4_K_S.gguf) | Q4_K_S | 5.48GB | false | Slightly lower quality with more space savings, *recommended*. |
| [Tiger-Gemma-9B-v2-Q4_0.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q4_0.gguf) | Q4_0 | 5.46GB | false | Legacy format, generally not worth using over similarly sized formats |
| [Tiger-Gemma-9B-v2-Q4_0_8_8.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q4_0_8_8.gguf) | Q4_0_8_8 | 5.44GB | false | Optimized for ARM inference. Requires 'sve' support (see link below). |
| [Tiger-Gemma-9B-v2-Q4_0_4_8.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q4_0_4_8.gguf) | Q4_0_4_8 | 5.44GB | false | Optimized for ARM inference. Requires 'i8mm' support (see link below). |
| [Tiger-Gemma-9B-v2-Q4_0_4_4.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q4_0_4_4.gguf) | Q4_0_4_4 | 5.44GB | false | Optimized for ARM inference. Should work well on all ARM chips, pick this if you're unsure. |
| [Tiger-Gemma-9B-v2-Q3_K_XL.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q3_K_XL.gguf) | Q3_K_XL | 5.35GB | false | Uses Q8_0 for embed and output weights. Lower quality but usable, good for low RAM availability. |
| [Tiger-Gemma-9B-v2-IQ4_XS.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-IQ4_XS.gguf) | IQ4_XS | 5.18GB | false | Decent quality, smaller than Q4_K_S with similar performance, *recommended*. |
| [Tiger-Gemma-9B-v2-Q3_K_L.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q3_K_L.gguf) | Q3_K_L | 5.13GB | false | Lower quality but usable, good for low RAM availability. |
| [Tiger-Gemma-9B-v2-Q3_K_M.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q3_K_M.gguf) | Q3_K_M | 4.76GB | false | Low quality. |
| [Tiger-Gemma-9B-v2-IQ3_M.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-IQ3_M.gguf) | IQ3_M | 4.49GB | false | Medium-low quality, new method with decent performance comparable to Q3_K_M. |
| [Tiger-Gemma-9B-v2-Q3_K_S.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q3_K_S.gguf) | Q3_K_S | 4.34GB | false | Low quality, not recommended. |
| [Tiger-Gemma-9B-v2-IQ3_XS.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-IQ3_XS.gguf) | IQ3_XS | 4.14GB | false | Lower quality, new method with decent performance, slightly better than Q3_K_S. |
| [Tiger-Gemma-9B-v2-Q2_K_L.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q2_K_L.gguf) | Q2_K_L | 4.03GB | false | Uses Q8_0 for embed and output weights. Very low quality but surprisingly usable. |
| [Tiger-Gemma-9B-v2-Q2_K.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-Q2_K.gguf) | Q2_K | 3.81GB | false | Very low quality but surprisingly usable. |
| [Tiger-Gemma-9B-v2-IQ2_M.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v2-GGUF/blob/main/Tiger-Gemma-9B-v2-IQ2_M.gguf) | IQ2_M | 3.43GB | false | Relatively low quality, uses SOTA techniques to be surprisingly usable. |
## Q4_0_X_X
If you're using an ARM chip, the Q4_0_X_X quants will have a substantial speedup. Check out Q4_0_4_4 speed comparisons [on the original pull request](https://github.com/ggerganov/llama.cpp/pull/5780#pullrequestreview-21657544660)
To check which one would work best for your ARM chip, you can check [AArch64 SoC features](https://gpages.juszkiewicz.com.pl/arm-socs-table/arm-socs.html)(thanks EloyOn!).
## Embed/output weights
Some of these quants (Q3_K_XL, Q4_K_L etc) are the standard quantization method with the embeddings and output weights quantized to Q8_0 instead of what they would normally default to.
Some say that this improves the quality, others don't notice any difference. If you use these models PLEASE COMMENT with your findings. I would like feedback that these are actually used and useful so I don't keep uploading quants no one is using.
Thanks!
## Credits
Thank you kalomaze and Dampf for assistance in creating the imatrix calibration dataset
Thank you ZeroWw for the inspiration to experiment with embed/output
## Downloading using huggingface-cli
First, make sure you have hugginface-cli installed:
```
pip install -U "huggingface_hub[cli]"
```
Then, you can target the specific file you want:
```
huggingface-cli download bartowski/Tiger-Gemma-9B-v2-GGUF --include "Tiger-Gemma-9B-v2-Q4_K_M.gguf" --local-dir ./
```
If the model is bigger than 50GB, it will have been split into multiple files. In order to download them all to a local folder, run:
```
huggingface-cli download bartowski/Tiger-Gemma-9B-v2-GGUF --include "Tiger-Gemma-9B-v2-Q8_0/*" --local-dir ./
```
You can either specify a new local-dir (Tiger-Gemma-9B-v2-Q8_0) or download them all in place (./)
## Which file should I choose?
A great write up with charts showing various performances is provided by Artefact2 [here](https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9)
The first thing to figure out is how big a model you can run. To do this, you'll need to figure out how much RAM and/or VRAM you have.
If you want your model running as FAST as possible, you'll want to fit the whole thing on your GPU's VRAM. Aim for a quant with a file size 1-2GB smaller than your GPU's total VRAM.
If you want the absolute maximum quality, add both your system RAM and your GPU's VRAM together, then similarly grab a quant with a file size 1-2GB Smaller than that total.
Next, you'll need to decide if you want to use an 'I-quant' or a 'K-quant'.
If you don't want to think too much, grab one of the K-quants. These are in format 'QX_K_X', like Q5_K_M.
If you want to get more into the weeds, you can check out this extremely useful feature chart:
[llama.cpp feature matrix](https://github.com/ggerganov/llama.cpp/wiki/Feature-matrix)
But basically, if you're aiming for below Q4, and you're running cuBLAS (Nvidia) or rocBLAS (AMD), you should look towards the I-quants. These are in format IQX_X, like IQ3_M. These are newer and offer better performance for their size.
These I-quants can also be used on CPU and Apple Metal, but will be slower than their K-quant equivalent, so speed vs performance is a tradeoff you'll have to decide.
The I-quants are *not* compatible with Vulcan, which is also AMD, so if you have an AMD card double check if you're using the rocBLAS build or the Vulcan build. At the time of writing this, LM Studio has a preview with ROCm support, and other inference engines have specific builds for ROCm.
Want to support my work? Visit my ko-fi page here: https://ko-fi.com/bartowski

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cfabca4a3193699e8f8d557b90e16aed09cd570d6401791c6362fb8478ba921b
size 3434669248

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ebca41f7882364f884faef91f2fbca818ad5ac9d81dd86215e942e76c1d18cd2
size 4494615744

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b35092adfe079d270593efa6c8e1c79958769d678232c0581c0d57afb051ae28
size 4144989376

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4363885bfde29e4003b61eb8641605cd195136da6372989052f3050883121c37
size 5183030464

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fee6b308d3bc872393e6d87f8e28aa76a36497dc0d98be5af649a2b26ae6eb49
size 3805398208

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8a810aa036f6068ced1b2a363f51f04a2dbae5fb7dd690836c845f27c2d70d87
size 4027606208

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:111dd4dbc2dd05f51eb7875e86d58bda3278fc0b9ed54d2cae482690d95351cf
size 5132453056

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f9a8a1fe7929aeedd0af0e2e6b66258d5329dea6ac8d714960db7b0a7aa72583
size 4761781440

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4b6b9dd9ad5324bd301491e0e8f27132d7c766a96c8e39d3fcad58a24df5ba0b
size 4337665216

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:790fbabf9d0d9bc3a4038205046b1802fde707abeb6052ef95d136db8e4e7270
size 5354661056

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0d347949ec9e03d416ae2b63e6504a197187ebb13ccc956c8dd030a5359ed6f7
size 5459199168

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e336b8740402c294497cac54a0c26549a04b985b26b52498bbb6d13401585a9b
size 5443142848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5931fafa33ad3d15ce51977ec0418d757bb4c0c68e75984eaa78b25760da8471
size 5443142848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5f926a65742ebe8698d9436c517c3f522d176cffafec4b3fe0917869fd8f5ead
size 5443142848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7dd86c44733a8f7b1a598e150afa210ad8628103d06ee80161cbe6009a8c64ac
size 5983265984

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e7fd84083b8049c9f3fe3abc47c35d2ee1a47caa2cf20240221d2d3d6d436858
size 5761057984

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6e9e0462617315c7fb9804ab60558006b7d63580b122c52ecfb216114a57df2f
size 5478925504

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:256f586d074bab568fb51079fd8c344a72bf8a00fe6c29b2bf1b44cd66a4a705
size 6869574848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e6aed426fa4a85ff69d343e9b59218331fb4d9207293e47ede99055f3b2f993f
size 6647366848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5593ac0a9decf6c0ee6d1e3cf721e651c540f83857f4c236fe552b1c7e1b6b50
size 6483592384

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:00373c4fe355ab845da253c18bd154a43edc15ba1e9d9cc6ef56252ab1409ddd
size 7589070016

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:68cac8860ce3ddeccc6da9d8e88ebf1d97672680ee6971df354824f981750156
size 7811278016

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:dda95917ddd4c809474954e9ada48c8b04720b613f4877f799da1b822d433cb1
size 9827148992

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1334f39a3b2d657a48ed2687674f902dd463fd5b1b098499d54ac495720ce32c
size 18490680224

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e14f6e066f1ef722dbb35f2a359756c1daa73985070f168dafa891b25f908245
size 6116900

1
configuration.json Normal file
View File

@@ -0,0 +1 @@
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}