diff --git a/README.md b/README.md index 6b81e69de..d83fdc7fd 100644 --- a/README.md +++ b/README.md @@ -411,6 +411,9 @@ logging_steps: save_steps: eval_steps: +# save model as safetensors (require safetensors package) +save_safetensors: + # whether to mask out or include the human's prompt from the training labels train_on_inputs: false # don't use this, leads to wonky training (according to someone on the internet) diff --git a/src/axolotl/utils/trainer.py b/src/axolotl/utils/trainer.py index 98ff9b3b9..d5697a9fe 100644 --- a/src/axolotl/utils/trainer.py +++ b/src/axolotl/utils/trainer.py @@ -182,6 +182,9 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer): training_arguments_kwargs["hub_model_id"] = cfg.hub_model_id training_arguments_kwargs["push_to_hub"] = True + if cfg.save_safetensors: + training_arguments_kwargs["save_safetensors"] = cfg.save_safetensors + training_args = AxolotlTrainingArguments( per_device_train_batch_size=cfg.micro_batch_size, per_device_eval_batch_size=cfg.eval_batch_size