ignore issues with calculating # params when printing (#1493)

This commit is contained in:
Wing Lian
2024-04-08 11:04:22 -04:00
committed by GitHub
parent 9430b6e868
commit 2fa65b9599

View File

@@ -902,7 +902,12 @@ def load_lora(model, cfg, inference=False, config_only=False):
model = get_peft_model(model, lora_config)
if rank == 0:
model.print_trainable_parameters()
try:
model.print_trainable_parameters()
except AttributeError as exc:
LOG.warning(
"Exception caught during model.print_trainable_parameters(): %s", exc
)
elif cfg.fsdp and cfg.adapter == "qlora":
setup_quantized_peft_meta_for_training(model)