From 266ef3f479a905e4c6ca158e401eb47907772665 Mon Sep 17 00:00:00 2001 From: Sunny Liu Date: Mon, 21 Apr 2025 17:17:41 -0400 Subject: [PATCH] skip set_quant_config if quantization not given --- src/axolotl/utils/models.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/axolotl/utils/models.py b/src/axolotl/utils/models.py index ffdc940fd..4563d6a8f 100644 --- a/src/axolotl/utils/models.py +++ b/src/axolotl/utils/models.py @@ -834,6 +834,8 @@ class ModelLoader: del self.model_kwargs["device_map"] def set_quantization_config(self) -> None: + if not self.cfg.quantization: + return self.model_kwargs["load_in_8bit"] = self.cfg.load_in_8bit self.model_kwargs["load_in_4bit"] = self.cfg.load_in_4bit @@ -887,7 +889,7 @@ class ModelLoader: # but deepspeed needs this still in bfloat16 bnb_config["bnb_4bit_quant_storage"] = torch.float32 - if self.cfg.quantization and self.cfg.quantization.bnb_config_kwargs: + if self.cfg.quantization.bnb_config_kwargs: bnb_config.update(self.cfg.quantization.bnb_config_kwargs) self.model_kwargs["quantization_config"] = BitsAndBytesConfig(