From 60861624881ab1e70579a100a8138fcda9aef0fb Mon Sep 17 00:00:00 2001 From: NanoCode012 Date: Fri, 24 Jan 2025 22:07:02 +0700 Subject: [PATCH] chore(doc): improve explanation for *_steps and *_strategy (#2270) --- docs/config.qmd | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/config.qmd b/docs/config.qmd index 179ee9ed1..f253decbe 100644 --- a/docs/config.qmd +++ b/docs/config.qmd @@ -360,10 +360,11 @@ warmup_ratio: 0.05 # cannot use with warmup_steps learning_rate: 0.00003 lr_quadratic_warmup: logging_steps: -eval_steps: # Leave empty to eval at each epoch, integers for every N steps. decimal for fraction of total steps +eval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps evals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps -save_strategy: # Set to `"no"` to skip checkpoint saves -save_steps: # Leave empty to save at each epoch +eval_strategy: # Set to `"no"` to skip evaluation, `"epoch"` at end of each epoch, leave empty to infer from `eval_steps`. +save_strategy: # Set to `"no"` to skip checkpoint saves, `"epoch"` at end of each epoch, `"best"` when better result is achieved, leave empty to infer from `save_steps`. +save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps save_total_limit: # Checkpoints saved at a time # Maximum number of iterations to train for. It precedes num_epochs which means that