Merge pull request #176 from NanoCode012/fix/peft-import

Fix backward compat for peft
This commit is contained in:
NanoCode012
2023-06-10 07:56:35 +09:00
committed by GitHub

View File

@@ -140,12 +140,18 @@ def load_model(
)
replace_peft_model_with_int4_lora_model()
else:
from peft import prepare_model_for_kbit_training
except Exception as err:
logging.exception(err)
raise err
try:
from peft import prepare_model_for_kbit_training
except ImportError:
# For backward compatibility
from peft import (
prepare_model_for_int8_training as prepare_model_for_kbit_training,
)
model_kwargs = {}
if cfg.adapter == "qlora" and cfg.load_in_4bit:
model_kwargs["quantization_config"] = BitsAndBytesConfig(