Fix wrong gemm branch cause 250us slower (#7969)
This commit is contained in:
@@ -2193,7 +2193,6 @@ class DeepseekV2ForCausalLM(nn.Module):
|
|||||||
# This may affect the accuracy of fp8 model.
|
# This may affect the accuracy of fp8 model.
|
||||||
# Fix deepseek v3 blockwise bmm by using deep_gemm
|
# Fix deepseek v3 blockwise bmm by using deep_gemm
|
||||||
use_deep_gemm_bmm = False
|
use_deep_gemm_bmm = False
|
||||||
model_dtype = torch.get_default_dtype()
|
|
||||||
|
|
||||||
if w.dtype in (
|
if w.dtype in (
|
||||||
torch.float8_e4m3fn,
|
torch.float8_e4m3fn,
|
||||||
@@ -2219,7 +2218,6 @@ class DeepseekV2ForCausalLM(nn.Module):
|
|||||||
_is_cuda
|
_is_cuda
|
||||||
and weight_block_size[0] == 128
|
and weight_block_size[0] == 128
|
||||||
and weight_block_size[1] == 128
|
and weight_block_size[1] == 128
|
||||||
and model_dtype == torch.bfloat16
|
|
||||||
):
|
):
|
||||||
if (
|
if (
|
||||||
deep_gemm_wrapper.ENABLE_JIT_DEEPGEMM
|
deep_gemm_wrapper.ENABLE_JIT_DEEPGEMM
|
||||||
@@ -2233,7 +2231,7 @@ class DeepseekV2ForCausalLM(nn.Module):
|
|||||||
weight,
|
weight,
|
||||||
weight_scale,
|
weight_scale,
|
||||||
weight_block_size,
|
weight_block_size,
|
||||||
model_dtype,
|
torch.bfloat16,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
w, scale = block_quant_to_tensor_quant(
|
w, scale = block_quant_to_tensor_quant(
|
||||||
|
|||||||
Reference in New Issue
Block a user