From 9208591f05c39963f423fb3fee841f94276da187 Mon Sep 17 00:00:00 2001 From: Yineng Zhang Date: Sat, 17 Aug 2024 22:45:42 +0800 Subject: [PATCH] fix: use fp16 dtype for sm75 (#1136) --- python/sglang/srt/model_executor/model_runner.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/python/sglang/srt/model_executor/model_runner.py b/python/sglang/srt/model_executor/model_runner.py index 2de432144..b74a19e60 100644 --- a/python/sglang/srt/model_executor/model_runner.py +++ b/python/sglang/srt/model_executor/model_runner.py @@ -148,6 +148,11 @@ class ModelRunner: f"[gpu={self.gpu_id}] Load weight begin. " f"avail mem={get_available_gpu_memory(self.gpu_id):.2f} GB" ) + if torch.cuda.get_device_capability()[0] < 8: + logger.info( + "Compute capability below sm80 use float16 due to lack of bfloat16 support." + ) + self.server_args.dtype = "float16" monkey_patch_vllm_dummy_weight_loader() device_config = DeviceConfig()