Sync from upstream llama.cpp repository
This commit is contained in:
9
requirements/requirements-convert_hf_to_gguf.txt
Normal file
9
requirements/requirements-convert_hf_to_gguf.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
-r ./requirements-convert_legacy_llama.txt
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
## Embedding Gemma requires PyTorch 2.6.0 or later
|
||||
torch~=2.6.0; platform_machine != "s390x"
|
||||
|
||||
# torch s390x packages can only be found from nightly builds
|
||||
--extra-index-url https://download.pytorch.org/whl/nightly
|
||||
torch>=0.0.0.dev0; platform_machine == "s390x"
|
||||
Reference in New Issue
Block a user