support server arg override KV cache to bf16 to avoid slow cases (#11749)
This commit is contained in:
@@ -1567,6 +1567,8 @@ class ModelRunner:
|
||||
self.kv_cache_dtype = torch.float8_e4m3fnuz
|
||||
else:
|
||||
self.kv_cache_dtype = torch.float8_e4m3fn
|
||||
elif self.server_args.kv_cache_dtype in ("bf16", "bfloat16"):
|
||||
self.kv_cache_dtype = torch.bfloat16
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unsupported kv_cache_dtype: {self.server_args.kv_cache_dtype}."
|
||||
|
||||
@@ -1652,8 +1652,8 @@ class ServerArgs:
|
||||
"--kv-cache-dtype",
|
||||
type=str,
|
||||
default=ServerArgs.kv_cache_dtype,
|
||||
choices=["auto", "fp8_e5m2", "fp8_e4m3"],
|
||||
help='Data type for kv cache storage. "auto" will use model data type. "fp8_e5m2" and "fp8_e4m3" is supported for CUDA 11.8+.',
|
||||
choices=["auto", "fp8_e5m2", "fp8_e4m3", "bf16", "bfloat16"],
|
||||
help='Data type for kv cache storage. "auto" will use model data type. "bf16" or "bfloat16" for BF16 KV cache. "fp8_e5m2" and "fp8_e4m3" are supported for CUDA 11.8+.',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--enable-fp32-lm-head",
|
||||
|
||||
Reference in New Issue
Block a user