fix: use fp16 dtype for sm75 (#1136)

This commit is contained in:
Yineng Zhang
2024-08-17 22:45:42 +08:00
committed by GitHub
parent 5d0d40d0eb
commit 9208591f05

View File

@@ -148,6 +148,11 @@ class ModelRunner:
f"[gpu={self.gpu_id}] Load weight begin. "
f"avail mem={get_available_gpu_memory(self.gpu_id):.2f} GB"
)
if torch.cuda.get_device_capability()[0] < 8:
logger.info(
"Compute capability below sm80 use float16 due to lack of bfloat16 support."
)
self.server_args.dtype = "float16"
monkey_patch_vllm_dummy_weight_loader()
device_config = DeviceConfig()