Revert "Small fixes for torchao quant" (#2493)

This commit is contained in:
Lianmin Zheng
2024-12-16 15:04:16 -08:00
committed by GitHub
parent 9cd9dc83b3
commit ba36b5520a
2 changed files with 5 additions and 6 deletions

View File

@@ -157,10 +157,6 @@ class ModelRunner:
self.sampler = Sampler()
self.load_model()
apply_torchao_config_to_model(
self.model, global_server_args_dict["torchao_config"]
)
# Apply torch TP if the model supports it
supports_torch_tp = getattr(self.model, "supports_torch_tp", False)
if self.tp_size > 1 and supports_torch_tp:
@@ -169,6 +165,10 @@ class ModelRunner:
else:
self.torch_tp_applied = False
apply_torchao_config_to_model(
self.model, global_server_args_dict["torchao_config"]
)
# Init memory pool and attention backends
if server_args.lora_paths is not None:
self.init_lora_manager()