Files
enginex-bi_150-vllm/vllm-llama-head_dim.patch

14 lines
868 B
Diff

--- lib64/python3/dist-packages/vllm/model_executor/models/llama.py
+++ lib64/python3/dist-packages/vllm/model_executor/models/llama.py
@@ -128,8 +128,8 @@
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
# MistralConfig has an optional head_dim introduced by Mistral-Nemo
- self.head_dim = getattr(config, "head_dim",
- self.hidden_size // self.total_num_heads)
+ def_head_dim = self.hidden_size // self.total_num_heads
+ self.head_dim = getattr(config, "head_dim", def_head_dim) or def_head_dim
# Phi models introduced a partial_rotary_factor parameter in the config
partial_rotary_factor = getattr(config, "partial_rotary_factor", 1)
self.rotary_dim = int(partial_rotary_factor * self.head_dim)