Fix: Warn when fullfinetune without adapter (#770)

This commit is contained in:
NanoCode012
2023-10-23 04:41:43 +09:00
committed by GitHub
parent ca84cca2c0
commit 44c9d0151a
2 changed files with 49 additions and 0 deletions

View File

@@ -360,6 +360,12 @@ def validate_config(cfg):
"eval_table_size and eval_sample_packing are not supported together with sample_packing. Please set 'eval_sample_packing' to false."
)
if not cfg.adapter and (cfg.load_in_8bit or cfg.load_in_4bit):
raise ValueError(
"load_in_8bit and load_in_4bit are not supported without setting an adapter."
"If you want to full finetune, please turn off load_in_8bit and load_in_4bit."
)
# TODO
# MPT 7b
# https://github.com/facebookresearch/bitsandbytes/issues/25

View File

@@ -606,3 +606,46 @@ class ValidationTest(unittest.TestCase):
)
validate_config(cfg)
def test_load_in_x_bit_without_adapter(self):
cfg = DictDefault(
{
"load_in_4bit": True,
}
)
with pytest.raises(
ValueError,
match=r".*load_in_8bit and load_in_4bit are not supported without setting an adapter.*",
):
validate_config(cfg)
cfg = DictDefault(
{
"load_in_8bit": True,
}
)
with pytest.raises(
ValueError,
match=r".*load_in_8bit and load_in_4bit are not supported without setting an adapter.*",
):
validate_config(cfg)
cfg = DictDefault(
{
"load_in_4bit": True,
"adapter": "qlora",
}
)
validate_config(cfg)
cfg = DictDefault(
{
"load_in_8bit": True,
"adapter": "lora",
}
)
validate_config(cfg)