add support for gradient accumulation steps

This commit is contained in:
Wing Lian
2023-05-30 23:24:37 -04:00
parent c5b0af1a7e
commit 3aad5f3b3e
3 changed files with 34 additions and 3 deletions

View File

@@ -149,8 +149,10 @@ def train(
else:
cfg[k] = kwargs[k]
validate_config(cfg)
# setup some derived config / hyperparams
cfg.gradient_accumulation_steps = cfg.batch_size // cfg.micro_batch_size
cfg.gradient_accumulation_steps = cfg.gradient_accumulation_steps or (cfg.batch_size // cfg.micro_batch_size)
cfg.world_size = int(os.environ.get("WORLD_SIZE", 1))
cfg.local_rank = int(os.environ.get("LOCAL_RANK", 0))
choose_device(cfg)
@@ -168,8 +170,6 @@ def train(
cfg.fp16 = True
cfg.bf16 = False
validate_config(cfg)
# load the tokenizer first
logging.info("loading tokenizer...")
tokenizer = load_tokenizer(cfg.base_model_config, cfg.tokenizer_type, cfg)