Merge branch 'main' into refactor/rename-4b-to-gptq

This commit is contained in:
Wing Lian
2023-05-27 09:37:52 -04:00
committed by GitHub
5 changed files with 141 additions and 12 deletions

View File

@@ -364,7 +364,7 @@ def load_lora(model, cfg):
PeftModel,
)
lora_target_modules = list(cfg.lora_target_modules)
lora_target_modules = list(cfg.lora_target_modules or [])
if cfg.lora_target_linear:
bits = None

View File

@@ -8,12 +8,12 @@ def validate_config(cfg):
if cfg.adapter == "qlora":
if cfg.merge_lora:
# can't merge qlora if loaded in 8bit or 4bit
assert cfg.load_in_8bit is False
assert cfg.gptq is False
assert cfg.load_in_4bit is False
assert cfg.load_in_8bit is not True
assert cfg.gptq is not True
assert cfg.load_in_4bit is not True
else:
assert cfg.load_in_8bit is False
assert cfg.gptq is False
assert cfg.load_in_8bit is not True
assert cfg.gptq is not True
assert cfg.load_in_4bit is True
if not cfg.load_in_8bit and cfg.adapter == "lora":