commit 9a05fa08fa0b2dea3e365251d9c342ff77ecab9e Author: ModelHub XC Date: Thu Apr 16 16:39:10 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: RichardErkhov/ibm_-_PowerMoE-3b-gguf Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..433b975 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,57 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.IQ3_XS.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.IQ3_S.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.IQ3_M.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q3_K.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.IQ4_NL.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q4_K.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q4_1.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q5_0.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q5_K.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q5_1.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +PowerMoE-3b.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/PowerMoE-3b.IQ3_M.gguf b/PowerMoE-3b.IQ3_M.gguf new file mode 100644 index 0000000..af57141 --- /dev/null +++ b/PowerMoE-3b.IQ3_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a88b1cfd0e4f280fa4832e545b3a8cba379a368d97d5c50cc5b9f2618c3f112b +size 1518577632 diff --git a/PowerMoE-3b.IQ3_S.gguf b/PowerMoE-3b.IQ3_S.gguf new file mode 100644 index 0000000..6b15592 --- /dev/null +++ b/PowerMoE-3b.IQ3_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c78f6b371e686f59216105bfcd92cf2d7c514f3bf4adc78d0dfd45a0e685611 +size 1488496608 diff --git a/PowerMoE-3b.IQ3_XS.gguf b/PowerMoE-3b.IQ3_XS.gguf new file mode 100644 index 0000000..96c1ac2 --- /dev/null +++ b/PowerMoE-3b.IQ3_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f00ca21a6294f1e88460615a976503cc24c0026d292276352fecb89b4675b82b +size 1412999136 diff --git a/PowerMoE-3b.IQ4_NL.gguf b/PowerMoE-3b.IQ4_NL.gguf new file mode 100644 index 0000000..d84d8a3 --- /dev/null +++ b/PowerMoE-3b.IQ4_NL.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c430b1058ae40f49c04277426efcb52744b7dd3f9d47368a4ccd5459fb129b7d +size 1942071264 diff --git a/PowerMoE-3b.IQ4_XS.gguf b/PowerMoE-3b.IQ4_XS.gguf new file mode 100644 index 0000000..119386e --- /dev/null +++ b/PowerMoE-3b.IQ4_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56a671c976c6f52bec741bf7cd9ce65b22c49770fcd8f17614f03abc10c20a20 +size 1842980832 diff --git a/PowerMoE-3b.Q2_K.gguf b/PowerMoE-3b.Q2_K.gguf new file mode 100644 index 0000000..65a9b5d --- /dev/null +++ b/PowerMoE-3b.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:165c4b5112d2cf1b67340e0694f5f442327b31ba74aff12377f3a45a2a568e82 +size 1266132960 diff --git a/PowerMoE-3b.Q3_K.gguf b/PowerMoE-3b.Q3_K.gguf new file mode 100644 index 0000000..5e67a83 --- /dev/null +++ b/PowerMoE-3b.Q3_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:806eacfa33d9d157315c89e1c31b01e512001847d9af9d9cc52be13a3cd8a044 +size 1643620320 diff --git a/PowerMoE-3b.Q3_K_L.gguf b/PowerMoE-3b.Q3_K_L.gguf new file mode 100644 index 0000000..d9757a2 --- /dev/null +++ b/PowerMoE-3b.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1363fdb9b11c389bd84affdafb47e5ea99a37cabdb973fb90abb0969c631dc9 +size 1773971424 diff --git a/PowerMoE-3b.Q3_K_M.gguf b/PowerMoE-3b.Q3_K_M.gguf new file mode 100644 index 0000000..5e67a83 --- /dev/null +++ b/PowerMoE-3b.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:806eacfa33d9d157315c89e1c31b01e512001847d9af9d9cc52be13a3cd8a044 +size 1643620320 diff --git a/PowerMoE-3b.Q3_K_S.gguf b/PowerMoE-3b.Q3_K_S.gguf new file mode 100644 index 0000000..f7dbfec --- /dev/null +++ b/PowerMoE-3b.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:575d3673c5d1c63e727775d952a08882ed2b61dbb0196ab5915925e813ea938c +size 1488496608 diff --git a/PowerMoE-3b.Q4_0.gguf b/PowerMoE-3b.Q4_0.gguf new file mode 100644 index 0000000..b043301 --- /dev/null +++ b/PowerMoE-3b.Q4_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4cd3f8e1fe29a981d4b4807485bb22ea190ef6210ce2d3688b724e954859686 +size 1926342624 diff --git a/PowerMoE-3b.Q4_1.gguf b/PowerMoE-3b.Q4_1.gguf new file mode 100644 index 0000000..2792734 --- /dev/null +++ b/PowerMoE-3b.Q4_1.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd295eec3a798d176effa2964e8847ff936d3fa6181268c4906991c78fff03a0 +size 2132387808 diff --git a/PowerMoE-3b.Q4_K.gguf b/PowerMoE-3b.Q4_K.gguf new file mode 100644 index 0000000..d2e4dc8 --- /dev/null +++ b/PowerMoE-3b.Q4_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5008075a5a1292f7b4f82c22dd95bed82f0cc07d81f0eca937aabb74e21628c9 +size 2059347936 diff --git a/PowerMoE-3b.Q4_K_M.gguf b/PowerMoE-3b.Q4_K_M.gguf new file mode 100644 index 0000000..d2e4dc8 --- /dev/null +++ b/PowerMoE-3b.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5008075a5a1292f7b4f82c22dd95bed82f0cc07d81f0eca937aabb74e21628c9 +size 2059347936 diff --git a/PowerMoE-3b.Q4_K_S.gguf b/PowerMoE-3b.Q4_K_S.gguf new file mode 100644 index 0000000..a2e7a21 --- /dev/null +++ b/PowerMoE-3b.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86ba6179da5bf77a435713a5ed95320792236aeb018968b06d0774e14d2cda57 +size 1942464480 diff --git a/PowerMoE-3b.Q5_0.gguf b/PowerMoE-3b.Q5_0.gguf new file mode 100644 index 0000000..8e1a911 --- /dev/null +++ b/PowerMoE-3b.Q5_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4e3dd6e70383d0bdd1e5c32706282376007a73a2cab8db7b4a9c255f4ee518c +size 2338432992 diff --git a/PowerMoE-3b.Q5_1.gguf b/PowerMoE-3b.Q5_1.gguf new file mode 100644 index 0000000..8db9248 --- /dev/null +++ b/PowerMoE-3b.Q5_1.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e2f02e924baa8703fe57687d1ae7cb08628e634bdc2e671d2b7be89df566bad +size 2544478176 diff --git a/PowerMoE-3b.Q5_K.gguf b/PowerMoE-3b.Q5_K.gguf new file mode 100644 index 0000000..ac3e97d --- /dev/null +++ b/PowerMoE-3b.Q5_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f891309c93148c994ce1ade6ec6ad52196ad5b4aada7a22a2c2d9bb39c7849f6 +size 2406950880 diff --git a/PowerMoE-3b.Q5_K_M.gguf b/PowerMoE-3b.Q5_K_M.gguf new file mode 100644 index 0000000..ac3e97d --- /dev/null +++ b/PowerMoE-3b.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f891309c93148c994ce1ade6ec6ad52196ad5b4aada7a22a2c2d9bb39c7849f6 +size 2406950880 diff --git a/PowerMoE-3b.Q5_K_S.gguf b/PowerMoE-3b.Q5_K_S.gguf new file mode 100644 index 0000000..c71748e --- /dev/null +++ b/PowerMoE-3b.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9530c9e0d8940d9c64eb5ea61413ec5f3baff98e67231c4700b8f7b408136f9c +size 2338432992 diff --git a/PowerMoE-3b.Q6_K.gguf b/PowerMoE-3b.Q6_K.gguf new file mode 100644 index 0000000..d7375a8 --- /dev/null +++ b/PowerMoE-3b.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78897fd226e1318c82b5c4b3c46c7d25a22e5fdfd4f2a9b02ba773ea0fb86b06 +size 2776279008 diff --git a/PowerMoE-3b.Q8_0.gguf b/PowerMoE-3b.Q8_0.gguf new file mode 100644 index 0000000..8f7fdf4 --- /dev/null +++ b/PowerMoE-3b.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84d77b54030f23bdb402985863c3ef003808d203464244ed4816274ba8380493 +size 3592988640 diff --git a/README.md b/README.md new file mode 100644 index 0000000..76da857 --- /dev/null +++ b/README.md @@ -0,0 +1,200 @@ +Quantization made by Richard Erkhov. + +[Github](https://github.com/RichardErkhov) + +[Discord](https://discord.gg/pvy7H8DZMG) + +[Request more models](https://github.com/RichardErkhov/quant_request) + + +PowerMoE-3b - GGUF +- Model creator: https://huggingface.co/ibm/ +- Original model: https://huggingface.co/ibm/PowerMoE-3b/ + + +| Name | Quant method | Size | +| ---- | ---- | ---- | +| [PowerMoE-3b.Q2_K.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q2_K.gguf) | Q2_K | 1.18GB | +| [PowerMoE-3b.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.IQ3_XS.gguf) | IQ3_XS | 1.32GB | +| [PowerMoE-3b.IQ3_S.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.IQ3_S.gguf) | IQ3_S | 1.39GB | +| [PowerMoE-3b.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q3_K_S.gguf) | Q3_K_S | 1.39GB | +| [PowerMoE-3b.IQ3_M.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.IQ3_M.gguf) | IQ3_M | 1.41GB | +| [PowerMoE-3b.Q3_K.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q3_K.gguf) | Q3_K | 1.53GB | +| [PowerMoE-3b.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q3_K_M.gguf) | Q3_K_M | 1.53GB | +| [PowerMoE-3b.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q3_K_L.gguf) | Q3_K_L | 1.65GB | +| [PowerMoE-3b.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.IQ4_XS.gguf) | IQ4_XS | 1.72GB | +| [PowerMoE-3b.Q4_0.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q4_0.gguf) | Q4_0 | 1.79GB | +| [PowerMoE-3b.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.IQ4_NL.gguf) | IQ4_NL | 1.81GB | +| [PowerMoE-3b.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q4_K_S.gguf) | Q4_K_S | 1.81GB | +| [PowerMoE-3b.Q4_K.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q4_K.gguf) | Q4_K | 1.92GB | +| [PowerMoE-3b.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q4_K_M.gguf) | Q4_K_M | 1.92GB | +| [PowerMoE-3b.Q4_1.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q4_1.gguf) | Q4_1 | 1.99GB | +| [PowerMoE-3b.Q5_0.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q5_0.gguf) | Q5_0 | 2.18GB | +| [PowerMoE-3b.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q5_K_S.gguf) | Q5_K_S | 2.18GB | +| [PowerMoE-3b.Q5_K.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q5_K.gguf) | Q5_K | 2.24GB | +| [PowerMoE-3b.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q5_K_M.gguf) | Q5_K_M | 2.24GB | +| [PowerMoE-3b.Q5_1.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q5_1.gguf) | Q5_1 | 2.37GB | +| [PowerMoE-3b.Q6_K.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q6_K.gguf) | Q6_K | 2.59GB | +| [PowerMoE-3b.Q8_0.gguf](https://huggingface.co/RichardErkhov/ibm_-_PowerMoE-3b-gguf/blob/main/PowerMoE-3b.Q8_0.gguf) | Q8_0 | 3.35GB | + + + + +Original model description: +--- +pipeline_tag: text-generation +inference: false +license: apache-2.0 +library_name: transformers +model-index: +- name: ibm/PowerMoE-3b + results: + - task: + type: text-generation + dataset: + type: lm-eval-harness + name: ARC + metrics: + - name: accuracy-norm + type: accuracy-norm + value: 58.1 + verified: false + - task: + type: text-generation + dataset: + type: lm-eval-harness + name: BoolQ + metrics: + - name: accuracy + type: accuracy + value: 65.0 + verified: false + - task: + type: text-generation + dataset: + type: lm-eval-harness + name: Hellaswag + metrics: + - name: accuracy-norm + type: accuracy-norm + value: 71.5 + verified: false + - task: + type: text-generation + dataset: + type: lm-eval-harness + name: OpenBookQA + metrics: + - name: accuracy-norm + type: accuracy-norm + value: 41.0 + verified: false + - task: + type: text-generation + dataset: + type: lm-eval-harness + name: PIQA + metrics: + - name: accuracy-norm + type: accuracy-norm + value: 79.1 + verified: false + - task: + type: text-generation + dataset: + type: lm-eval-harness + name: Winogrande + metrics: + - name: accuracy-norm + type: accuracy-norm + value: 65.0 + verified: false + - task: + type: text-generation + dataset: + type: lm-eval-harness + name: MMLU (5 shot) + metrics: + - name: accuracy + type: accuracy + value: 42.8 + verified: false + - task: + type: text-generation + dataset: + type: lm-eval-harness + name: GSM8k (5 shot) + metrics: + - name: accuracy + type: accuracy + value: 25.9 + verified: false + - task: + type: text-generation + dataset: + type: lm-eval-harness + name: math (4 shot) + metrics: + - name: accuracy + type: accuracy + value: 14.8 + verified: false + - task: + type: text-generation + dataset: + type: bigcode-eval + name: humaneval + metrics: + - name: pass@1 + type: pass@1 + value: 20.1 + verified: false + - task: + type: text-generation + dataset: + type: bigcode-eval + name: MBPP + metrics: + - name: pass@1 + type: pass@1 + value: 32.4 + verified: false +--- + +## Model Summary +PowerMoE-3B is a 3B sparse Mixture-of-Experts (sMoE) language model trained with the Power learning rate scheduler. It sparsely activates 800M parameters for each token. It is trained on a mix of open-source and proprietary datasets. PowerMoE-3B has shown promising results compared to other dense models with 2x activate parameters across various benchmarks, including natural language multi-choices, code generation, and math reasoning. +Paper: https://arxiv.org/abs/2408.13359 + +## Usage +Note: Requires installing HF transformers from source. + +### Generation +This is a simple example of how to use **PowerMoE-3b** model. + +```python +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer +device = "cuda" # or "cpu" +model_path = "ibm/PowerMoE-3b" +tokenizer = AutoTokenizer.from_pretrained(model_path) +# drop device_map if running on CPU +model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device) +model.eval() +# change input text as desired +prompt = "Write a code to find the maximum value in a list of numbers." +# tokenize the text +input_tokens = tokenizer(prompt, return_tensors="pt") +# transfer tokenized inputs to the device +for i in input_tokens: + input_tokens[i] = input_tokens[i].to(device) +# generate output tokens +output = model.generate(**input_tokens, max_new_tokens=100) +# decode output tokens into text +output = tokenizer.batch_decode(output) +# loop over the batch to print, in this example the batch size is 1 +for i in output: + print(i) +``` + + +Additional thanks to @nicoboss for giving me access to his private supercomputer, enabling me to provide many more quants, at much higher speed, than I would otherwise be able to. \ No newline at end of file