From a546ca2813548d7930b0718c0b4b5b33cf875bc6 Mon Sep 17 00:00:00 2001 From: Wing Lian Date: Tue, 5 Sep 2023 16:40:13 -0400 Subject: [PATCH] misc fixes/improvements (#513) fix per pr feedback --- src/axolotl/train.py | 8 +++++--- src/axolotl/utils/trainer.py | 18 +++++++++++------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/src/axolotl/train.py b/src/axolotl/train.py index 51ef35903..b1be8f8a3 100644 --- a/src/axolotl/train.py +++ b/src/axolotl/train.py @@ -88,6 +88,11 @@ def train( if peft_config: LOG.info(f"Pre-saving adapter config to {cfg.output_dir}") peft_config.save_pretrained(cfg.output_dir) + # additionally presave the tokenizer and model configs + if not Path(cfg.output_dir).is_dir(): + os.makedirs(cfg.output_dir, exist_ok=True) + tokenizer.save_pretrained(str(Path(cfg.output_dir))) + model.config.save_pretrained(str(Path(cfg.output_dir))) # In case we want to stop early with ctrl+c, this is a nice to have to save the pretrained model if cfg.local_rank == 0: @@ -106,9 +111,6 @@ def train( if cfg.group_by_length: LOG.info("hang tight... sorting dataset for group_by_length") - if not Path(cfg.output_dir).is_dir(): - os.makedirs(cfg.output_dir, exist_ok=True) - tokenizer.save_pretrained(cfg.output_dir) if cfg.flash_optimum: with torch.backends.cuda.sdp_kernel( enable_flash=True, enable_math=True, enable_mem_efficient=True diff --git a/src/axolotl/utils/trainer.py b/src/axolotl/utils/trainer.py index c3d6b85cb..3bc283d75 100644 --- a/src/axolotl/utils/trainer.py +++ b/src/axolotl/utils/trainer.py @@ -33,6 +33,7 @@ from axolotl.utils.callbacks import ( ) from axolotl.utils.collators import DataCollatorForSeq2Seq from axolotl.utils.dataloader import MultipackDistributedDataloader +from axolotl.utils.distributed import is_main_process, zero_first from axolotl.utils.schedulers import get_cosine_schedule_with_quadratic_warmup LOG = logging.getLogger("axolotl") @@ -375,14 +376,17 @@ def disable_datasets_caching(): def process_datasets_for_packing(cfg, train_dataset, eval_dataset): drop_long = partial(drop_long_seq, sequence_len=cfg.sequence_len) - train_dataset = train_dataset.filter(drop_long, num_proc=os.cpu_count()) - if eval_dataset: - eval_dataset = eval_dataset.filter(drop_long, num_proc=os.cpu_count()) - - if cfg.sample_packing: - train_dataset = train_dataset.map(add_position_ids, num_proc=os.cpu_count()) + with zero_first(is_main_process()): + train_dataset = train_dataset.filter(drop_long, num_proc=os.cpu_count()) if eval_dataset: - eval_dataset = eval_dataset.map(add_position_ids, num_proc=os.cpu_count()) + eval_dataset = eval_dataset.filter(drop_long, num_proc=os.cpu_count()) + + if cfg.sample_packing: + train_dataset = train_dataset.map(add_position_ids, num_proc=os.cpu_count()) + if eval_dataset: + eval_dataset = eval_dataset.map( + add_position_ids, num_proc=os.cpu_count() + ) return train_dataset, eval_dataset