Remove validate_quantized_dora (#1485)
DoRA with quantized layers is supported with PEFT 0.10.0
This commit is contained in:
@@ -242,17 +242,6 @@ class LoraConfig(BaseModel):
|
||||
raise ValueError("Require cfg.load_in_4bit to be True for qlora")
|
||||
return self
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_quantized_dora(cls, data):
|
||||
if data.get("peft_use_dora") and (
|
||||
data.get("load_in_8bit") or data.get("load_in_4bit")
|
||||
):
|
||||
raise ValueError(
|
||||
"`peft_use_dora` is not currently compatible with quantized weights."
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
class ReLoRAConfig(BaseModel):
|
||||
"""ReLoRA configuration subset"""
|
||||
|
||||
Reference in New Issue
Block a user