[modelopt] automatically inspect if model is ModelOpt quantized and set quantization method (#5145)
This commit is contained in:
@@ -15,6 +15,7 @@
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
from enum import IntEnum, auto
|
||||
from typing import List, Optional, Set, Union
|
||||
|
||||
@@ -234,6 +235,20 @@ class ModelConfig:
|
||||
if quant_cfg is None:
|
||||
# compressed-tensors uses a "compression_config" key
|
||||
quant_cfg = getattr(self.hf_config, "compression_config", None)
|
||||
if quant_cfg is None:
|
||||
# check if is modelopt model -- modelopt doesn't have corresponding field
|
||||
# in hf `config.json` but has a standalone `hf_quant_config.json` in the root directory
|
||||
# example: https://huggingface.co/nvidia/Llama-3.1-8B-Instruct-FP8/tree/main
|
||||
is_local = os.path.isdir(self.model_path)
|
||||
modelopt_quant_config = {"quant_method": "modelopt"}
|
||||
if not is_local:
|
||||
from huggingface_hub import HfApi
|
||||
|
||||
hf_api = HfApi()
|
||||
if hf_api.file_exists(self.model_path, "hf_quant_config.json"):
|
||||
quant_cfg = modelopt_quant_config
|
||||
elif os.path.exists(os.path.join(self.model_path, "hf_quant_config.json")):
|
||||
quant_cfg = modelopt_quant_config
|
||||
return quant_cfg
|
||||
|
||||
# adapted from https://github.com/vllm-project/vllm/blob/v0.6.4.post1/vllm/config.py
|
||||
|
||||
Reference in New Issue
Block a user