Amd test fp8 (#4261)

This commit is contained in:
HandH1998
2025-03-11 01:12:09 +08:00
committed by GitHub
parent 5a6400eec5
commit 2ac189edc8
6 changed files with 84 additions and 0 deletions

View File

@@ -237,6 +237,7 @@ class ModelConfig:
"compressed_tensors",
"compressed-tensors",
"fbgemm_fp8",
"w8a8_fp8",
]
optimized_quantization_methods = [
"fp8",

View File

@@ -32,6 +32,10 @@ if _is_cuda:
else:
from sgl_kernel import fp8_scaled_mm
# Input scaling factors are no longer optional in _scaled_mm starting
# from pytorch 2.5. Allocating a dummy tensor to pass as input_scale
TORCH_DEVICE_IDENTITY = torch.ones(1, dtype=torch.float32)
def cutlass_fp8_supported():
if not _is_cuda:

View File

@@ -28,6 +28,10 @@ from sglang.test.run_eval import run_eval
from sglang.utils import get_exception_traceback
DEFAULT_FP8_MODEL_NAME_FOR_TEST = "neuralmagic/Meta-Llama-3.1-8B-FP8"
DEFAULT_FP8_MODEL_NAME_FOR_ACCURACY_TEST = "neuralmagic/Meta-Llama-3-8B-Instruct-FP8"
DEFAULT_FP8_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST = (
"neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8-dynamic"
)
DEFAULT_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.1-8B-Instruct"
DEFAULT_SMALL_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.2-1B-Instruct"
DEFAULT_MOE_MODEL_NAME_FOR_TEST = "mistralai/Mixtral-8x7B-Instruct-v0.1"