Update metadata with huggingface_hub

This commit is contained in:
ai-modelscope
2024-11-19 19:26:41 +08:00
parent 04fd3f0269
commit 01ac4b5dcb
28 changed files with 224 additions and 55 deletions

50
.gitattributes vendored
View File

@@ -1,38 +1,60 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zstandard filter=lfs diff=lfs merge=lfs -text
*.tfevents* filter=lfs diff=lfs merge=lfs -text
*.db* filter=lfs diff=lfs merge=lfs -text
*.ark* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.gguf* filter=lfs diff=lfs merge=lfs -text
*.ggml filter=lfs diff=lfs merge=lfs -text
*.llamafile* filter=lfs diff=lfs merge=lfs -text
*.pt2 filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q6_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q5_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q4_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q4_0_8_8.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q4_0_4_8.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q4_0_4_4.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q3_K_XL.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-IQ3_M.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-IQ3_XS.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q2_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-IQ2_M.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3-f16.gguf filter=lfs diff=lfs merge=lfs -text
Tiger-Gemma-9B-v3.imatrix filter=lfs diff=lfs merge=lfs -text

153
README.md
View File

@@ -1,47 +1,118 @@
---
license: Apache License 2.0
#model-type:
##如 gpt、phi、llama、chatglm、baichuan 等
#- gpt
#domain:
##如 nlp、cv、audio、multi-modal
#- nlp
#language:
##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
#- cn
#metrics:
##如 CIDEr、Blue、ROUGE 等
#- CIDEr
#tags:
##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
#- pretrained
#tools:
##如 vllm、fastchat、llamacpp、AdaSeq 等
#- vllm
base_model: TheDrummer/Tiger-Gemma-9B-v3
pipeline_tag: text-generation
quantized_by: bartowski
---
### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
#### 您可以通过如下git clone命令或者ModelScope SDK来下载模型
SDK下载
```bash
#安装ModelScope
pip install modelscope
## Llamacpp imatrix Quantizations of Tiger-Gemma-9B-v3
Using <a href="https://github.com/ggerganov/llama.cpp/">llama.cpp</a> release <a href="https://github.com/ggerganov/llama.cpp/releases/tag/b3878">b3878</a> for quantization.
Original model: https://huggingface.co/TheDrummer/Tiger-Gemma-9B-v3
All quants made using imatrix option with dataset from [here](https://gist.github.com/bartowski1182/eb213dccb3571f863da82e99418f81e8)
Run them in [LM Studio](https://lmstudio.ai/)
## Prompt format
No prompt format found, check original model page
## Download a file (not the whole branch) from below:
| Filename | Quant type | File Size | Split | Description |
| -------- | ---------- | --------- | ----- | ----------- |
| [Tiger-Gemma-9B-v3-f16.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-f16.gguf) | f16 | 18.49GB | false | Full F16 weights. |
| [Tiger-Gemma-9B-v3-Q8_0.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q8_0.gguf) | Q8_0 | 9.83GB | false | Extremely high quality, generally unneeded but max available quant. |
| [Tiger-Gemma-9B-v3-Q6_K_L.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q6_K_L.gguf) | Q6_K_L | 7.81GB | false | Uses Q8_0 for embed and output weights. Very high quality, near perfect, *recommended*. |
| [Tiger-Gemma-9B-v3-Q6_K.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q6_K.gguf) | Q6_K | 7.59GB | false | Very high quality, near perfect, *recommended*. |
| [Tiger-Gemma-9B-v3-Q5_K_L.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q5_K_L.gguf) | Q5_K_L | 6.87GB | false | Uses Q8_0 for embed and output weights. High quality, *recommended*. |
| [Tiger-Gemma-9B-v3-Q5_K_M.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q5_K_M.gguf) | Q5_K_M | 6.65GB | false | High quality, *recommended*. |
| [Tiger-Gemma-9B-v3-Q5_K_S.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q5_K_S.gguf) | Q5_K_S | 6.48GB | false | High quality, *recommended*. |
| [Tiger-Gemma-9B-v3-Q4_K_L.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q4_K_L.gguf) | Q4_K_L | 5.98GB | false | Uses Q8_0 for embed and output weights. Good quality, *recommended*. |
| [Tiger-Gemma-9B-v3-Q4_K_M.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q4_K_M.gguf) | Q4_K_M | 5.76GB | false | Good quality, default size for must use cases, *recommended*. |
| [Tiger-Gemma-9B-v3-Q4_K_S.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q4_K_S.gguf) | Q4_K_S | 5.48GB | false | Slightly lower quality with more space savings, *recommended*. |
| [Tiger-Gemma-9B-v3-Q4_0.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q4_0.gguf) | Q4_0 | 5.46GB | false | Legacy format, generally not worth using over similarly sized formats |
| [Tiger-Gemma-9B-v3-Q4_0_8_8.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q4_0_8_8.gguf) | Q4_0_8_8 | 5.44GB | false | Optimized for ARM inference. Requires 'sve' support (see link below). |
| [Tiger-Gemma-9B-v3-Q4_0_4_8.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q4_0_4_8.gguf) | Q4_0_4_8 | 5.44GB | false | Optimized for ARM inference. Requires 'i8mm' support (see link below). |
| [Tiger-Gemma-9B-v3-Q4_0_4_4.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q4_0_4_4.gguf) | Q4_0_4_4 | 5.44GB | false | Optimized for ARM inference. Should work well on all ARM chips, pick this if you're unsure. |
| [Tiger-Gemma-9B-v3-Q3_K_XL.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q3_K_XL.gguf) | Q3_K_XL | 5.35GB | false | Uses Q8_0 for embed and output weights. Lower quality but usable, good for low RAM availability. |
| [Tiger-Gemma-9B-v3-IQ4_XS.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-IQ4_XS.gguf) | IQ4_XS | 5.18GB | false | Decent quality, smaller than Q4_K_S with similar performance, *recommended*. |
| [Tiger-Gemma-9B-v3-Q3_K_L.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q3_K_L.gguf) | Q3_K_L | 5.13GB | false | Lower quality but usable, good for low RAM availability. |
| [Tiger-Gemma-9B-v3-Q3_K_M.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q3_K_M.gguf) | Q3_K_M | 4.76GB | false | Low quality. |
| [Tiger-Gemma-9B-v3-IQ3_M.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-IQ3_M.gguf) | IQ3_M | 4.49GB | false | Medium-low quality, new method with decent performance comparable to Q3_K_M. |
| [Tiger-Gemma-9B-v3-Q3_K_S.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q3_K_S.gguf) | Q3_K_S | 4.34GB | false | Low quality, not recommended. |
| [Tiger-Gemma-9B-v3-IQ3_XS.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-IQ3_XS.gguf) | IQ3_XS | 4.14GB | false | Lower quality, new method with decent performance, slightly better than Q3_K_S. |
| [Tiger-Gemma-9B-v3-Q2_K_L.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q2_K_L.gguf) | Q2_K_L | 4.03GB | false | Uses Q8_0 for embed and output weights. Very low quality but surprisingly usable. |
| [Tiger-Gemma-9B-v3-Q2_K.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-Q2_K.gguf) | Q2_K | 3.81GB | false | Very low quality but surprisingly usable. |
| [Tiger-Gemma-9B-v3-IQ2_M.gguf](https://huggingface.co/bartowski/Tiger-Gemma-9B-v3-GGUF/blob/main/Tiger-Gemma-9B-v3-IQ2_M.gguf) | IQ2_M | 3.43GB | false | Relatively low quality, uses SOTA techniques to be surprisingly usable. |
## Embed/output weights
Some of these quants (Q3_K_XL, Q4_K_L etc) are the standard quantization method with the embeddings and output weights quantized to Q8_0 instead of what they would normally default to.
Some say that this improves the quality, others don't notice any difference. If you use these models PLEASE COMMENT with your findings. I would like feedback that these are actually used and useful so I don't keep uploading quants no one is using.
Thanks!
## Downloading using huggingface-cli
First, make sure you have hugginface-cli installed:
```
```python
#SDK模型下载
from modelscope import snapshot_download
model_dir = snapshot_download('bartowski/Tiger-Gemma-9B-v3-GGUF')
```
Git下载
```
#Git模型下载
git clone https://www.modelscope.cn/bartowski/Tiger-Gemma-9B-v3-GGUF.git
pip install -U "huggingface_hub[cli]"
```
<p style="color: lightgrey;">如果您是本模型的贡献者,我们邀请您根据<a href="https://modelscope.cn/docs/ModelScope%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%85%A5%E6%B5%81%E7%A8%8B%E6%A6%82%E8%A7%88" style="color: lightgrey; text-decoration: underline;">模型贡献文档</a>,及时完善模型卡片内容。</p>
Then, you can target the specific file you want:
```
huggingface-cli download bartowski/Tiger-Gemma-9B-v3-GGUF --include "Tiger-Gemma-9B-v3-Q4_K_M.gguf" --local-dir ./
```
If the model is bigger than 50GB, it will have been split into multiple files. In order to download them all to a local folder, run:
```
huggingface-cli download bartowski/Tiger-Gemma-9B-v3-GGUF --include "Tiger-Gemma-9B-v3-Q8_0/*" --local-dir ./
```
You can either specify a new local-dir (Tiger-Gemma-9B-v3-Q8_0) or download them all in place (./)
## Q4_0_X_X
These are *NOT* for Metal (Apple) offloading, only ARM chips.
If you're using an ARM chip, the Q4_0_X_X quants will have a substantial speedup. Check out Q4_0_4_4 speed comparisons [on the original pull request](https://github.com/ggerganov/llama.cpp/pull/5780#pullrequestreview-21657544660)
To check which one would work best for your ARM chip, you can check [AArch64 SoC features](https://gpages.juszkiewicz.com.pl/arm-socs-table/arm-socs.html) (thanks EloyOn!).
## Which file should I choose?
A great write up with charts showing various performances is provided by Artefact2 [here](https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9)
The first thing to figure out is how big a model you can run. To do this, you'll need to figure out how much RAM and/or VRAM you have.
If you want your model running as FAST as possible, you'll want to fit the whole thing on your GPU's VRAM. Aim for a quant with a file size 1-2GB smaller than your GPU's total VRAM.
If you want the absolute maximum quality, add both your system RAM and your GPU's VRAM together, then similarly grab a quant with a file size 1-2GB Smaller than that total.
Next, you'll need to decide if you want to use an 'I-quant' or a 'K-quant'.
If you don't want to think too much, grab one of the K-quants. These are in format 'QX_K_X', like Q5_K_M.
If you want to get more into the weeds, you can check out this extremely useful feature chart:
[llama.cpp feature matrix](https://github.com/ggerganov/llama.cpp/wiki/Feature-matrix)
But basically, if you're aiming for below Q4, and you're running cuBLAS (Nvidia) or rocBLAS (AMD), you should look towards the I-quants. These are in format IQX_X, like IQ3_M. These are newer and offer better performance for their size.
These I-quants can also be used on CPU and Apple Metal, but will be slower than their K-quant equivalent, so speed vs performance is a tradeoff you'll have to decide.
The I-quants are *not* compatible with Vulcan, which is also AMD, so if you have an AMD card double check if you're using the rocBLAS build or the Vulcan build. At the time of writing this, LM Studio has a preview with ROCm support, and other inference engines have specific builds for ROCm.
## Credits
Thank you kalomaze and Dampf for assistance in creating the imatrix calibration dataset
Thank you ZeroWw for the inspiration to experiment with embed/output
Want to support my work? Visit my ko-fi page here: https://ko-fi.com/bartowski

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9fec3e69e25d4101b60ceb101f7c98f9a1af99f25154abc9a15a0bb034d9e5c6
size 3434669248

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:411283658b90c3d636b6724e53fc188a2d90a50030676214541edd015b2fd467
size 4494615744

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a7e273c18cba56e0650ba276e0fdfb98584d865aae36bf9449dbe6362c16c51e
size 4144989376

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8df2bdcda7ecf68dcce76ca249fe1d098f459ed34a9797162e5198a51c51a937
size 5183030464

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8983997345ef38451eef32b62195b99a456ade3f19bd21f844b9c135439821bf
size 3805398208

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0358402a77a63846f327c307590265a09ccdf5da21c2f052663ef4f11647ef2b
size 4027606208

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a80b3bdf0acbb930a3960dff8699c5735262b69d74bbbdc0ecaad51f8f22d59a
size 5132453056

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:df12c0fa9777f858b771a8a1e44702bd7f8084f2587016cc2b8b1c7d126b69b6
size 4761781440

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:74edbede38f707e4d2f58c548e6dc9f3e1023bab830750f706eb150be41c4b94
size 4337665216

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:907e81648af9d82e3e1b87a93fe7e63d7383aa7ecde3d6cf1de07aede10607e8
size 5354661056

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1545146a6b2b978d7c768a118bae4289486d0d6da052478263b88dce221af471
size 5459199168

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:114bc00e7aaa0567c95c9f738f0bc0536e9dae3c8495ddb399b1b688fab37bd3
size 5443142848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b13a3d1de77a00f54d5a6cbf0d0ae0e3df564a2481b24af9ad9534a6b7630085
size 5443142848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:935f07a30d2fed746634f40d2c75d4256fc81e919f31748948d762a504baaa11
size 5443142848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5c9f81b9f8942b569ad9464b4a7ff654ba90fc9e3ee4a64d0855f5a9cdbc09f4
size 5983265984

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:62e6411ebf3a76ac2c801b2a41bddb511dd8d0c49973bc088d14d640c2ad2131
size 5761057984

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f393f48a821c5d65dd8e9c30c02a3e052d85fb6e59c37102ccb81c639a1d6961
size 5478925504

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2c3c9e81a39cfec9641299bb49ddd51bbfb7decfe115ae77f104686bd8bd5d15
size 6869574848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:86ea6f94fec661b82de826978db73d1cb70a6681a0cf8fc3b7000188bb78a43f
size 6647366848

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:304c41386d112ec7c6e656235016597abe00995881c1d98aec2e69e7ad52ebec
size 6483592384

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8a5004eda31c8b6198aa511dd00bc9bfa9b7a7b54ac7273f503a40085c79a7f5
size 7589070016

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:024e9ac8ffb89cfdb7793cff32d67b4db91024c18a11c39eb20c6a9579d673c8
size 7811278016

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7da219878e7c5324bead74d98d8988ce54f2a8a1cd9b3e744e1a2a7b6cfa7d5d
size 9827148992

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1c130376047ee3a780a3a39eef057a096c9af4bacaaa43d831656cab59aa65cc
size 18490680224

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f2d8575be7868c85fa8be0f6bc9eaf8edc129aa0380db807b4b2cd728c37e9ff
size 6116900

1
configuration.json Normal file
View File

@@ -0,0 +1 @@
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}