Merge pull request #76 from OpenAccess-AI-Collective/truthy-validation
Truthy validation
This commit is contained in:
@@ -364,7 +364,7 @@ def load_lora(model, cfg):
|
||||
PeftModel,
|
||||
)
|
||||
|
||||
lora_target_modules = list(cfg.lora_target_modules)
|
||||
lora_target_modules = list(cfg.lora_target_modules or [])
|
||||
|
||||
if cfg.lora_target_linear:
|
||||
bits = None
|
||||
|
||||
@@ -5,12 +5,12 @@ def validate_config(cfg):
|
||||
if cfg.adapter == "qlora":
|
||||
if cfg.merge_lora:
|
||||
# can't merge qlora if loaded in 8bit or 4bit
|
||||
assert cfg.load_in_8bit is False
|
||||
assert cfg.load_4bit is False
|
||||
assert cfg.load_in_8bit is not True
|
||||
assert cfg.load_4bit is not True
|
||||
assert cfg.load_in_4bit is False
|
||||
else:
|
||||
assert cfg.load_in_8bit is False
|
||||
assert cfg.load_4bit is False
|
||||
assert cfg.load_in_8bit is not True
|
||||
assert cfg.load_4bit is not True
|
||||
assert cfg.load_in_4bit is True
|
||||
if not cfg.load_in_8bit and cfg.adapter == "lora":
|
||||
logging.warning("We recommend setting `load_in_8bit: true` for LORA finetuning")
|
||||
|
||||
Reference in New Issue
Block a user