fix: small bug for llama-405b fp16 (#733)

This commit is contained in:
Ying Sheng
2024-07-25 21:14:54 -07:00
committed by GitHub
parent 7f6f2f0f09
commit 252e0f7bbd
2 changed files with 2 additions and 1 deletions

View File

@@ -121,7 +121,7 @@ class ModelRunner:
skip_tokenizer_init=True,
)
if is_llama3_405b_fp8(self.model_config):
if is_llama3_405b_fp8(self.model_config) and self.tp_size <= 8:
# A temporary hack to fix the num_heads for meta-llama/Meta-Llama-3.1-405B-FP8 checkpoints
self.model_config.hf_config.num_key_value_heads = 8
vllm_model_config.hf_config.num_key_value_heads = 8

View File

@@ -626,6 +626,7 @@ def is_llama3_405b_fp8(model_config):
and model_config.hf_config.intermediate_size == 53248
and model_config.hf_config.num_hidden_layers == 126
and model_config.hf_config.num_key_value_heads == 16
and hasattr(model_config.hf_config, "quantization_config")
and model_config.hf_config.quantization_config["quant_method"] == "fbgemm_fp8"
):
return True