Fix loading KV quantization scale; Enable modelopt kv cache (#4686)
Co-authored-by: qingquansong <ustcsqq@gmail.com>
This commit is contained in:
@@ -37,11 +37,6 @@ DEFAULT_FP8_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST = (
|
||||
DEFAULT_FP8_MODEL_NAME_FOR_MODELOPT_QUANT_ACCURACY_TEST = (
|
||||
"nvidia/Llama-3.1-8B-Instruct-FP8"
|
||||
)
|
||||
# TODO(yundai424): right now specifying to an older revision since the latest one
|
||||
# carries kv cache quantization which doesn't work yet
|
||||
DEFAULT_FP8_MODEL_NAME_FOR_MODELOPT_QUANT_ACCURACY_TEST_REVISION = (
|
||||
"13858565416dbdc0b4e7a4a677fadfbd5b9e5bb9"
|
||||
)
|
||||
|
||||
DEFAULT_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.1-8B-Instruct"
|
||||
DEFAULT_SMALL_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.2-1B-Instruct"
|
||||
|
||||
Reference in New Issue
Block a user