fix eval_steps to be a sane default (#797)

* fix eval_steps to be a sane default

* update docs for fractional eval_steps
This commit is contained in:
Wing Lian
2023-10-27 22:36:30 -04:00
committed by GitHub
parent 0800885e2f
commit 8b79ff0e94
23 changed files with 36 additions and 36 deletions

View File

@@ -618,14 +618,14 @@ gradient_accumulation_steps: 1
# The number of samples to include in each batch. This is the number of samples sent to each GPU.
micro_batch_size: 2
eval_batch_size:
num_epochs: 3
num_epochs: 4
warmup_steps: 100
learning_rate: 0.00003
lr_quadratic_warmup:
logging_steps:
save_strategy: # Set to `no` to skip checkpoint saves
save_steps: # Leave empty to save at each epoch
eval_steps: # Leave empty to eval at each epoch
eval_steps: # Leave empty to eval at each epoch, integers for every N steps. decimal for fraction of total steps
save_total_limit: # Checkpoints saved at a time
# Maximum number of iterations to train for. It precedes num_epochs which means that
# if both are set, num_epochs will not be guaranteed.

View File

@@ -49,7 +49,7 @@ flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
save_steps:
debug:
deepspeed:

View File

@@ -34,7 +34,7 @@ wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 3
num_epochs: 4
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.0002
@@ -54,7 +54,7 @@ xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
save_steps:
debug:
deepspeed:

View File

@@ -36,7 +36,7 @@ wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 3
num_epochs: 4
optimizer: paged_adamw_32bit
lr_scheduler: cosine
learning_rate: 0.0002
@@ -56,7 +56,7 @@ xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
save_steps:
debug:
deepspeed:

View File

@@ -34,7 +34,7 @@ wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 3
num_epochs: 4
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.0002
@@ -54,7 +54,7 @@ xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
save_steps:
debug:
deepspeed:

View File

@@ -36,7 +36,7 @@ wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 3
num_epochs: 4
optimizer: paged_adamw_32bit
lr_scheduler: cosine
learning_rate: 0.0002
@@ -56,7 +56,7 @@ xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
save_steps:
debug:
deepspeed:

View File

@@ -34,7 +34,7 @@ wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 3
num_epochs: 4
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.0002
@@ -54,7 +54,7 @@ xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
save_steps:
debug:
deepspeed:

View File

@@ -36,7 +36,7 @@ wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 3
num_epochs: 4
optimizer: paged_adamw_32bit
lr_scheduler: cosine
learning_rate: 0.0002
@@ -56,7 +56,7 @@ xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
save_steps:
debug:
deepspeed:

View File

@@ -53,7 +53,7 @@ output_dir: ./qlora-out
# decrease if OOM, increase for max VRAM utilization
micro_batch_size: 1
gradient_accumulation_steps: 2
num_epochs: 3
num_epochs: 4
# Optimizer for QLoRA
optimizer: paged_adamw_32bit
torchdistx_path:

View File

@@ -46,7 +46,7 @@ flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
save_steps:
debug:
deepspeed:

View File

@@ -24,7 +24,7 @@ wandb_log_model:
output_dir: ./jeopardy-bot-7b
gradient_accumulation_steps: 1
micro_batch_size: 1
num_epochs: 3
num_epochs: 4
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine

View File

@@ -37,7 +37,7 @@ wandb_log_model:
output_dir: ./model-out
gradient_accumulation_steps: 1
micro_batch_size: 1
num_epochs: 3
num_epochs: 4
optimizer: adamw_torch
adam_beta2: 0.95
adam_eps: 0.00001

View File

@@ -34,7 +34,7 @@ wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 3
num_epochs: 4
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.0002
@@ -54,7 +54,7 @@ xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
eval_table_size:
eval_table_max_new_tokens: 128
save_steps:

View File

@@ -36,7 +36,7 @@ wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 3
num_epochs: 4
optimizer: paged_adamw_32bit
lr_scheduler: cosine
learning_rate: 0.0002
@@ -56,7 +56,7 @@ xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
eval_table_size:
save_steps:
debug:

View File

@@ -40,7 +40,7 @@ wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 4
num_epochs: 3
num_epochs: 4
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.0002
@@ -60,7 +60,7 @@ xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
save_steps: 50
debug:
deepspeed:

View File

@@ -34,7 +34,7 @@ wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 3
num_epochs: 4
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.0002
@@ -54,7 +54,7 @@ xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
eval_table_size:
save_steps:
debug:

View File

@@ -26,7 +26,7 @@ wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 3
num_epochs: 4
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.000005
@@ -46,7 +46,7 @@ xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
eval_table_size:
eval_table_max_new_tokens: 128
save_steps:

View File

@@ -63,7 +63,7 @@ xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps: 20
eval_steps: 0.05
eval_table_size:
eval_table_max_new_tokens: 128
save_steps:

View File

@@ -26,7 +26,7 @@ wandb_log_model:
output_dir: ./mpt-alpaca-7b
gradient_accumulation_steps: 1
micro_batch_size: 1
num_epochs: 3
num_epochs: 4
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine

View File

@@ -23,7 +23,7 @@ wandb_log_model:
output_dir: ./lora-alpaca-pythia
gradient_accumulation_steps: 1
micro_batch_size: 4
num_epochs: 3
num_epochs: 4
learning_rate: 0.00001
train_on_inputs: false
group_by_length: false
@@ -33,5 +33,5 @@ early_stopping_patience:
resume_from_checkpoint:
local_rank:
weight_decay: 0.1
eval_steps: 20
eval_steps: 0.05
logging_steps: 1

View File

@@ -27,7 +27,7 @@ wandb_log_model:
output_dir: ./redpajama-alpaca-3b
batch_size: 4
micro_batch_size: 1
num_epochs: 3
num_epochs: 4
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine

View File

@@ -26,7 +26,7 @@ wandb_log_model:
output_dir: ./lora-replit
batch_size: 8
micro_batch_size: 1
num_epochs: 3
num_epochs: 4
optimizer:
torchdistx_path:
lr_scheduler:

View File

@@ -51,7 +51,7 @@ output_dir: ./qlora-out
# decrease if OOM, increase for max VRAM utilization
micro_batch_size: 1
gradient_accumulation_steps: 1
num_epochs: 3
num_epochs: 4
# Optimizer for QLoRA
optimizer: paged_adamw_32bit
torchdistx_path: