diff --git a/src/axolotl/core/trainer_builder.py b/src/axolotl/core/trainer_builder.py index edc842994..62c6a9721 100755 --- a/src/axolotl/core/trainer_builder.py +++ b/src/axolotl/core/trainer_builder.py @@ -1880,6 +1880,8 @@ class HFCausalTrainerBuilder(TrainerBuilderBase): if training_args.pretraining: if self.cfg.pretraining_sample_concatenation is False: return DataCollatorForSeq2Seq(self.tokenizer, **kwargs) + if self.cfg.micro_batch_size > 1: + return DataCollatorForSeq2Seq(self.tokenizer, **kwargs) return None if self.cfg.model_config_type == "mamba": diff --git a/src/axolotl/utils/data/pretraining.py b/src/axolotl/utils/data/pretraining.py index c30d62575..f20ced221 100644 --- a/src/axolotl/utils/data/pretraining.py +++ b/src/axolotl/utils/data/pretraining.py @@ -191,7 +191,7 @@ def wrap_pretraining_dataset( tokenizer, return_tensors="pt", padding=True, - pad_to_multiple_of=max_tokens * batch_size, + pad_to_multiple_of=max_tokens, multipack_attn=cfg.pretrain_multipack_attn, ) encode = functools.partial( @@ -201,8 +201,6 @@ def wrap_pretraining_dataset( max_seq_length=max_tokens, batch_size=batch_size, multipack_attn=cfg.pretrain_multipack_attn, - group_size=cfg.sample_packing_group_size, - bin_size=cfg.sample_packing_bin_size, ) # set this to 1 so downstream data_loader doesn't try to increase the batch again cfg.micro_batch_size = 1 @@ -247,9 +245,7 @@ def encode_packed_pretraining( examples: Dict[str, List], max_seq_length: int = 2048, batch_size: int = 4, - multipack_attn: Optional[bool] = False, - group_size: int = 100000, - bin_size: int = 200, + multipack_attn: Optional[bool] = True, ) -> Dict[str, List]: # pylint: disable=duplicate-code # tokenize all the examples @@ -260,6 +256,9 @@ def encode_packed_pretraining( train_dataset, max_seq_length, skip_position_ids=not multipack_attn, + # FIXME using attention mask unpad/pad with trainer and packed pretraining is broken atm + # workaround by using the position id logic for now in trainer + drop_attention_mask=multipack_attn, ) sampler = MultipackBatchSampler( @@ -267,8 +266,6 @@ def encode_packed_pretraining( lengths=get_dataset_lengths(train_dataset), batch_size=1, batch_max_len=batch_size * max_seq_length, - group_size=group_size, - bin_size=bin_size, drop_last=True, ) diff --git a/src/axolotl/utils/trainer.py b/src/axolotl/utils/trainer.py index 34b505ff1..bfd21703d 100644 --- a/src/axolotl/utils/trainer.py +++ b/src/axolotl/utils/trainer.py @@ -310,19 +310,22 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset): def process_pretraining_datasets_for_packing( - train_dataset, sequence_len, skip_position_ids=True + train_dataset, sequence_len, skip_position_ids=True, drop_attention_mask=False ): drop_long = partial(drop_long_seq, sequence_len=sequence_len) train_dataset = train_dataset.filter( drop_long, desc="Dropping Long Sequences", + load_from_cache_file=False, ) - if skip_position_ids: + if not skip_position_ids: train_dataset = train_dataset.map( add_position_ids, desc="Add position_id column (Pretraining Sample Packing)", ) + if drop_attention_mask: + train_dataset = train_dataset.remove_columns("attention_mask") return train_dataset diff --git a/tests/e2e/test_llama_pretrain.py b/tests/e2e/test_llama_pretrain.py index 117eba25d..c1f024b87 100644 --- a/tests/e2e/test_llama_pretrain.py +++ b/tests/e2e/test_llama_pretrain.py @@ -13,7 +13,7 @@ from axolotl.train import train from axolotl.utils.config import normalize_config from axolotl.utils.dict import DictDefault -from .utils import check_model_output_exists +from .utils import check_model_output_exists, check_tensorboard LOG = logging.getLogger("axolotl.tests.e2e") os.environ["WANDB_DISABLED"] = "true" @@ -28,19 +28,25 @@ class TestPretrainLlama: "sample_packing", [True, False], ) - def test_pretrain(self, temp_dir, sample_packing): + @pytest.mark.parametrize( + "pretrain_multipack_attn", + [True, False], + ) + def test_pretrain(self, temp_dir, sample_packing, pretrain_multipack_attn): + if not sample_packing and pretrain_multipack_attn: + return + # pylint: disable=duplicate-code cfg = DictDefault( { - "base_model": "JackFram/llama-68m", - "tokenizer_type": "LlamaTokenizer", + "base_model": "HuggingFaceTB/SmolLM2-135M", "flash_attention": True, "sequence_len": 1024, "sample_packing": sample_packing, + "pretrain_multipack_attn": pretrain_multipack_attn, + "dataset_processes": 1, "special_tokens": { - "unk_token": "", - "bos_token": "", - "eos_token": "", + "pad_token": "<|endoftext|>", }, "pretraining_dataset": [ { @@ -51,7 +57,7 @@ class TestPretrainLlama: ], "max_steps": 5, "num_epochs": 1, - "micro_batch_size": 1, + "micro_batch_size": 2, "gradient_accumulation_steps": 1, "val_set_size": 0.0, "output_dir": temp_dir, @@ -60,6 +66,7 @@ class TestPretrainLlama: "lr_scheduler": "cosine", "save_safetensors": True, "bf16": "auto", + "use_tensorboard": True, } ) normalize_config(cfg) @@ -68,3 +75,12 @@ class TestPretrainLlama: train(cfg=cfg, dataset_meta=dataset_meta) check_model_output_exists(temp_dir, cfg) + loss_threshold = 3.5 + if sample_packing and not pretrain_multipack_attn: + loss_threshold = 6.5 + check_tensorboard( + temp_dir + "/runs", + "train/train_loss", + loss_threshold, + "Train Loss is too high", + ) diff --git a/tests/test_packed_pretraining.py b/tests/test_packed_pretraining.py index fbb776aa5..9f9ae60fb 100644 --- a/tests/test_packed_pretraining.py +++ b/tests/test_packed_pretraining.py @@ -41,6 +41,7 @@ class TestPretrainingPacking(unittest.TestCase): } ], "sample_packing": True, + "pretrain_multipack_attn": True, "pad_to_sequence_len": True, "sequence_len": 2048, "micro_batch_size": 2, @@ -87,9 +88,11 @@ class TestPretrainingPacking(unittest.TestCase): assert data["labels"].shape == torch.Size( [1, original_bsz * cfg.sequence_len] ) - assert data["attention_mask"].shape == torch.Size( - [1, original_bsz * cfg.sequence_len] - ) + assert "attention_mask" not in data + # FIXME add back once we fix packing unpad/pad with attention mask + # assert data["attention_mask"].shape == torch.Size( + # [1, original_bsz * cfg.sequence_len] + # ) idx += 1