support server arg override KV cache to bf16 to avoid slow cases (#11749)

This commit is contained in:
b8zhong
2025-10-18 11:49:48 -07:00
committed by GitHub
parent a93f10a722
commit f9a7d9b3dc
3 changed files with 5 additions and 3 deletions

View File

@@ -1567,6 +1567,8 @@ class ModelRunner:
self.kv_cache_dtype = torch.float8_e4m3fnuz
else:
self.kv_cache_dtype = torch.float8_e4m3fn
elif self.server_args.kv_cache_dtype in ("bf16", "bfloat16"):
self.kv_cache_dtype = torch.bfloat16
else:
raise ValueError(
f"Unsupported kv_cache_dtype: {self.server_args.kv_cache_dtype}."

View File

@@ -1652,8 +1652,8 @@ class ServerArgs:
"--kv-cache-dtype",
type=str,
default=ServerArgs.kv_cache_dtype,
choices=["auto", "fp8_e5m2", "fp8_e4m3"],
help='Data type for kv cache storage. "auto" will use model data type. "fp8_e5m2" and "fp8_e4m3" is supported for CUDA 11.8+.',
choices=["auto", "fp8_e5m2", "fp8_e4m3", "bf16", "bfloat16"],
help='Data type for kv cache storage. "auto" will use model data type. "bf16" or "bfloat16" for BF16 KV cache. "fp8_e5m2" and "fp8_e4m3" are supported for CUDA 11.8+.',
)
parser.add_argument(
"--enable-fp32-lm-head",