Remove validate_quantized_dora (#1485)

DoRA with quantized layers is supported with PEFT 0.10.0
This commit is contained in:
xzuyn
2024-04-08 01:25:23 -04:00
committed by GitHub
parent 934fc851da
commit 9430b6e868

View File

@@ -242,17 +242,6 @@ class LoraConfig(BaseModel):
raise ValueError("Require cfg.load_in_4bit to be True for qlora")
return self
@model_validator(mode="before")
@classmethod
def validate_quantized_dora(cls, data):
if data.get("peft_use_dora") and (
data.get("load_in_8bit") or data.get("load_in_4bit")
):
raise ValueError(
"`peft_use_dora` is not currently compatible with quantized weights."
)
return data
class ReLoRAConfig(BaseModel):
"""ReLoRA configuration subset"""