DPO fixes v2 (#1174)

* check for length before trying to remove it

* add validation for sample packing with RLHF
This commit is contained in:
Wing Lian
2024-01-23 12:56:24 -05:00
committed by GitHub
parent 814aee6603
commit 59a31fe613
2 changed files with 5 additions and 1 deletions

View File

@@ -227,7 +227,8 @@ class AxolotlTrainer(Trainer):
def get_train_dataloader(self) -> DataLoader:
if self.args.sample_packing and not self.args.pretraining:
train_dataset = self.train_dataset
train_dataset = train_dataset.remove_columns(["length"])
if "length" in train_dataset.features.keys():
train_dataset = train_dataset.remove_columns(["length"])
data_collator = self.data_collator
dataloader_params = {
"batch_size": self._train_batch_size,

View File

@@ -204,6 +204,9 @@ def validate_config(cfg):
if cfg.max_packed_sequence_len:
raise DeprecationWarning("`max_packed_sequence_len` is no longer supported")
if cfg.sample_packing and cfg.rl:
raise ValueError("`sample_packing: true` does not work with RLHF training")
if cfg.sample_packing and not cfg.pad_to_sequence_len:
LOG.warning(
"`pad_to_sequence_len: true` is recommended when using sample_packing"