commit a6063c410e01758ed671ac03bf6ba3b9ad4620d4 Author: ModelHub XC Date: Sun Apr 12 08:34:58 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: mradermacher/PModel-GGUF Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..973547b --- /dev/null +++ b/.gitattributes @@ -0,0 +1,47 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +PModel.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text +PModel.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +PModel.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +PModel.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +PModel.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +PModel.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +PModel.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +PModel.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +PModel.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +PModel.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +PModel.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +PModel.f16.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/PModel.IQ4_XS.gguf b/PModel.IQ4_XS.gguf new file mode 100644 index 0000000..66b62cf --- /dev/null +++ b/PModel.IQ4_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3d161d277840ef818e6b85f5c65ed9a53edb7cd82f6288b369bb3b3e30c4e6f +size 82558880 diff --git a/PModel.Q2_K.gguf b/PModel.Q2_K.gguf new file mode 100644 index 0000000..f2a3717 --- /dev/null +++ b/PModel.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39bb349679ed665b67c3faee0519e9792498b43d338c4c60129f1b6fae5f757b +size 68532128 diff --git a/PModel.Q3_K_L.gguf b/PModel.Q3_K_L.gguf new file mode 100644 index 0000000..39ef2b5 --- /dev/null +++ b/PModel.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9c3e0ac77b512b039e915da925cadcaebe6d82cc482e9c6f225eb42bd51a995 +size 85508000 diff --git a/PModel.Q3_K_M.gguf b/PModel.Q3_K_M.gguf new file mode 100644 index 0000000..f40680c --- /dev/null +++ b/PModel.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:563bc5ecd43704fd856044ad7d2d0d64ae7558be387df1c8d1fa4b6f722d76c9 +size 81084320 diff --git a/PModel.Q3_K_S.gguf b/PModel.Q3_K_S.gguf new file mode 100644 index 0000000..37620e0 --- /dev/null +++ b/PModel.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:836346cf9ff4c1dd7b1dc30f03c7ea06886c190ff0135dfc6241b81f971e9c1d +size 73564064 diff --git a/PModel.Q4_K_M.gguf b/PModel.Q4_K_M.gguf new file mode 100644 index 0000000..2c304db --- /dev/null +++ b/PModel.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e865e5d21c6b5a8e4434c4be93991e06cceeb7d0345bb57dc558c4d046cc347a +size 91148192 diff --git a/PModel.Q4_K_S.gguf b/PModel.Q4_K_S.gguf new file mode 100644 index 0000000..1dd4d7d --- /dev/null +++ b/PModel.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:006bb234bcae0b30c037530968c80eae266b4598e7d914646c3e72d87dd22039 +size 85139360 diff --git a/PModel.Q5_K_M.gguf b/PModel.Q5_K_M.gguf new file mode 100644 index 0000000..3097cd6 --- /dev/null +++ b/PModel.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e79cb164036a2308bfeeccebfff0d28eb7f32cf21bf9ccb70612ba61ab401e18 +size 100161440 diff --git a/PModel.Q5_K_S.gguf b/PModel.Q5_K_S.gguf new file mode 100644 index 0000000..e015e48 --- /dev/null +++ b/PModel.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffc2bf72511a667564fdf03dd809595ad167de261d1befc97ee6307979f4c6a4 +size 95461280 diff --git a/PModel.Q6_K.gguf b/PModel.Q6_K.gguf new file mode 100644 index 0000000..9b03145 --- /dev/null +++ b/PModel.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31ab2f6736c1bab929f90d13c5073d029e017fd44d96450069992954e24ce0bd +size 106741664 diff --git a/PModel.Q8_0.gguf b/PModel.Q8_0.gguf new file mode 100644 index 0000000..3891a0e --- /dev/null +++ b/PModel.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d794aa30a6be5c6232a47618ff9cf9619398661f89b0720c07191440f542c21 +size 136659584 diff --git a/PModel.f16.gguf b/PModel.f16.gguf new file mode 100644 index 0000000..c5e5f6c --- /dev/null +++ b/PModel.f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26c3bb45ec185018ab3112c718263bfe0f5d20c2592e4201682f6ae4ddb43f48 +size 252470848 diff --git a/README.md b/README.md new file mode 100644 index 0000000..b03fbd5 --- /dev/null +++ b/README.md @@ -0,0 +1,70 @@ +--- +base_model: gray567/PModel +language: en +library_name: transformers +license: mit +mradermacher: + readme_rev: 1 +quantized_by: mradermacher +tags: +- exbert +--- +## About + + + + + + +static quants of https://huggingface.co/gray567/PModel + + + +***For a convenient overview and download list, visit our [model page for this model](https://hf.tst.eu/model#PModel-GGUF).*** + +weighted/imatrix quants are available at https://huggingface.co/mradermacher/PModel-i1-GGUF +## Usage + +If you are unsure how to use GGUF files, refer to one of [TheBloke's +READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for +more details, including on how to concatenate multi-part files. + +## Provided Quants + +(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) + +| Link | Type | Size/GB | Notes | +|:-----|:-----|--------:|:------| +| [GGUF](https://huggingface.co/mradermacher/PModel-GGUF/resolve/main/PModel.Q2_K.gguf) | Q2_K | 0.2 | | +| [GGUF](https://huggingface.co/mradermacher/PModel-GGUF/resolve/main/PModel.Q3_K_S.gguf) | Q3_K_S | 0.2 | | +| [GGUF](https://huggingface.co/mradermacher/PModel-GGUF/resolve/main/PModel.Q3_K_M.gguf) | Q3_K_M | 0.2 | lower quality | +| [GGUF](https://huggingface.co/mradermacher/PModel-GGUF/resolve/main/PModel.IQ4_XS.gguf) | IQ4_XS | 0.2 | | +| [GGUF](https://huggingface.co/mradermacher/PModel-GGUF/resolve/main/PModel.Q4_K_S.gguf) | Q4_K_S | 0.2 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/PModel-GGUF/resolve/main/PModel.Q3_K_L.gguf) | Q3_K_L | 0.2 | | +| [GGUF](https://huggingface.co/mradermacher/PModel-GGUF/resolve/main/PModel.Q4_K_M.gguf) | Q4_K_M | 0.2 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/PModel-GGUF/resolve/main/PModel.Q5_K_S.gguf) | Q5_K_S | 0.2 | | +| [GGUF](https://huggingface.co/mradermacher/PModel-GGUF/resolve/main/PModel.Q5_K_M.gguf) | Q5_K_M | 0.2 | | +| [GGUF](https://huggingface.co/mradermacher/PModel-GGUF/resolve/main/PModel.Q6_K.gguf) | Q6_K | 0.2 | very good quality | +| [GGUF](https://huggingface.co/mradermacher/PModel-GGUF/resolve/main/PModel.Q8_0.gguf) | Q8_0 | 0.2 | fast, best quality | +| [GGUF](https://huggingface.co/mradermacher/PModel-GGUF/resolve/main/PModel.f16.gguf) | f16 | 0.4 | 16 bpw, overkill | + +Here is a handy graph by ikawrakow comparing some lower-quality quant +types (lower is better): + +![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) + +And here are Artefact2's thoughts on the matter: +https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 + +## FAQ / Model Request + +See https://huggingface.co/mradermacher/model_requests for some answers to +questions you might have and/or if you want some other model quantized. + +## Thanks + +I thank my company, [nethype GmbH](https://www.nethype.de/), for letting +me use its servers and providing upgrades to my workstation to enable +this work in my free time. + +