diff --git a/src/axolotl/utils/config/models/input/v0_4_1/__init__.py b/src/axolotl/utils/config/models/input/v0_4_1/__init__.py index 3236bc643..11983acd9 100644 --- a/src/axolotl/utils/config/models/input/v0_4_1/__init__.py +++ b/src/axolotl/utils/config/models/input/v0_4_1/__init__.py @@ -179,6 +179,7 @@ class LoraConfig(BaseModel): peft_layers_to_transform: Optional[List[int]] = None peft: Optional[PeftConfig] = None peft_use_dora: Optional[bool] = None + peft_use_relora: Optional[bool] = None lora_on_cpu: Optional[bool] = None gptq: Optional[bool] = None diff --git a/src/axolotl/utils/models.py b/src/axolotl/utils/models.py index 36c9c17e3..53201c996 100644 --- a/src/axolotl/utils/models.py +++ b/src/axolotl/utils/models.py @@ -1055,6 +1055,8 @@ def load_lora(model, cfg, inference=False, config_only=False): lora_config_kwargs["init_lora_weights"] = "loftq" if cfg.peft_use_dora: lora_config_kwargs["use_dora"] = cfg.peft_use_dora + if cfg.peft_use_rslora: + lora_config_kwargs["use_rslora"] = cfg.use_rslora lora_config = LoraConfig( r=cfg.lora_r,