Update vllm to v0.4.3 (#511)

Co-authored-by: Qubitium <417764+Qubitium@users.noreply.github.com>
Co-authored-by: ZX <zx@lbx.dev>
This commit is contained in:
Lianmin Zheng
2024-06-07 12:11:31 -07:00
committed by GitHub
parent 3bc01ac137
commit bf3e271fe0
16 changed files with 44 additions and 12 deletions

View File

@@ -8,6 +8,7 @@ import torch
import torch.nn.functional as F
from torch import nn
from transformers import MixtralConfig
from vllm.config import CacheConfig
from vllm.distributed import (
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
@@ -313,6 +314,7 @@ class QuantMixtralForCausalLM(nn.Module):
self,
config: MixtralConfig,
quant_config: Optional[QuantizationConfig] = None,
cache_config: Optional[CacheConfig] = None,
) -> None:
super().__init__()
self.config = config