diff --git a/vllm-v0.6.2/vllm/model_executor/layers/vocab_parallel_embedding.py b/vllm-v0.6.2/vllm/model_executor/layers/vocab_parallel_embedding.py index 52771f5..776ec33 100644 --- a/vllm-v0.6.2/vllm/model_executor/layers/vocab_parallel_embedding.py +++ b/vllm-v0.6.2/vllm/model_executor/layers/vocab_parallel_embedding.py @@ -38,6 +38,9 @@ class UnquantizedEmbeddingMethod(QuantizeMethodBase): layer: torch.nn.Module, x: torch.Tensor, bias: Optional[torch.Tensor] = None) -> torch.Tensor: + # MLU F.linear requires matching dtypes + if x.dtype != layer.weight.dtype: + x = x.to(layer.weight.dtype) return F.linear(x, layer.weight, bias) def embedding(self, layer: torch.nn.Module,