Pretrain multipack (#2278)
* fix for pretrain with packing * fix model name and loss expected * make sure to check with micro batch size for pretraining * change loss threshholds based on parametrization * make tests smaller for CI * fix pretrain packing * fix pretrain packing test * address pr feedback
This commit is contained in:
@@ -1880,6 +1880,8 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
||||
if training_args.pretraining:
|
||||
if self.cfg.pretraining_sample_concatenation is False:
|
||||
return DataCollatorForSeq2Seq(self.tokenizer, **kwargs)
|
||||
if self.cfg.micro_batch_size > 1:
|
||||
return DataCollatorForSeq2Seq(self.tokenizer, **kwargs)
|
||||
return None
|
||||
|
||||
if self.cfg.model_config_type == "mamba":
|
||||
|
||||
@@ -191,7 +191,7 @@ def wrap_pretraining_dataset(
|
||||
tokenizer,
|
||||
return_tensors="pt",
|
||||
padding=True,
|
||||
pad_to_multiple_of=max_tokens * batch_size,
|
||||
pad_to_multiple_of=max_tokens,
|
||||
multipack_attn=cfg.pretrain_multipack_attn,
|
||||
)
|
||||
encode = functools.partial(
|
||||
@@ -201,8 +201,6 @@ def wrap_pretraining_dataset(
|
||||
max_seq_length=max_tokens,
|
||||
batch_size=batch_size,
|
||||
multipack_attn=cfg.pretrain_multipack_attn,
|
||||
group_size=cfg.sample_packing_group_size,
|
||||
bin_size=cfg.sample_packing_bin_size,
|
||||
)
|
||||
# set this to 1 so downstream data_loader doesn't try to increase the batch again
|
||||
cfg.micro_batch_size = 1
|
||||
@@ -247,9 +245,7 @@ def encode_packed_pretraining(
|
||||
examples: Dict[str, List],
|
||||
max_seq_length: int = 2048,
|
||||
batch_size: int = 4,
|
||||
multipack_attn: Optional[bool] = False,
|
||||
group_size: int = 100000,
|
||||
bin_size: int = 200,
|
||||
multipack_attn: Optional[bool] = True,
|
||||
) -> Dict[str, List]:
|
||||
# pylint: disable=duplicate-code
|
||||
# tokenize all the examples
|
||||
@@ -260,6 +256,9 @@ def encode_packed_pretraining(
|
||||
train_dataset,
|
||||
max_seq_length,
|
||||
skip_position_ids=not multipack_attn,
|
||||
# FIXME using attention mask unpad/pad with trainer and packed pretraining is broken atm
|
||||
# workaround by using the position id logic for now in trainer
|
||||
drop_attention_mask=multipack_attn,
|
||||
)
|
||||
|
||||
sampler = MultipackBatchSampler(
|
||||
@@ -267,8 +266,6 @@ def encode_packed_pretraining(
|
||||
lengths=get_dataset_lengths(train_dataset),
|
||||
batch_size=1,
|
||||
batch_max_len=batch_size * max_seq_length,
|
||||
group_size=group_size,
|
||||
bin_size=bin_size,
|
||||
drop_last=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -310,19 +310,22 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset):
|
||||
|
||||
|
||||
def process_pretraining_datasets_for_packing(
|
||||
train_dataset, sequence_len, skip_position_ids=True
|
||||
train_dataset, sequence_len, skip_position_ids=True, drop_attention_mask=False
|
||||
):
|
||||
drop_long = partial(drop_long_seq, sequence_len=sequence_len)
|
||||
|
||||
train_dataset = train_dataset.filter(
|
||||
drop_long,
|
||||
desc="Dropping Long Sequences",
|
||||
load_from_cache_file=False,
|
||||
)
|
||||
if skip_position_ids:
|
||||
if not skip_position_ids:
|
||||
train_dataset = train_dataset.map(
|
||||
add_position_ids,
|
||||
desc="Add position_id column (Pretraining Sample Packing)",
|
||||
)
|
||||
if drop_attention_mask:
|
||||
train_dataset = train_dataset.remove_columns("attention_mask")
|
||||
|
||||
return train_dataset
|
||||
|
||||
|
||||
Reference in New Issue
Block a user