bump hf deps (#2735) [skip ci]
* bump hf deps * upgrade liger-kernel too * install cce from fork for transformers fix * fix reference to vocab size in gemma3 patch * use padding_idx instead of pad_token_id * remove fixed gemma3 patch * use updated cce fork * fix local mllama cce patches w docstring * add test for multipack with trainer setup and fix trainer for trainer refactor upstream * bump modal version * guard for iterable datasetS * mllama model arch layout changed in latest transformers * fix batch sampler with drop_last * fix: address upstream vlm changes for lora * fix: update references to old lora target path * fix: remove mllama fa2 patch due to upstream fix * fix: lora kernel patch path for multimodal models * fix: removed mllama from quarto * run test for came optim on 2.6.0+ * fix fsdp2 patch and remove deprecated patch * make sure to set sequence_parallel_degree for grpo * Add SP test for GRPO * add sp to grpo config for trainer * use reward_funcs as kwarg to grpo trainer * fix the comprehension for reward funcs * reward funcs already passed in as args * init sp_group right before training * fix check for adding models to SP context * make sure to pass args to super * upgrade deepspeed * use updated trl and add reasoning flags for vllm * patch the worker --------- Co-authored-by: NanoCode012 <nano@axolotl.ai>
This commit is contained in:
@@ -6,10 +6,16 @@ from pathlib import Path
|
||||
from datasets import Dataset, load_dataset
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from axolotl.cli.args import TrainerCliArgs
|
||||
from axolotl.common.datasets import load_datasets
|
||||
from axolotl.datasets import ConstantLengthDataset, TokenizedPromptDataset
|
||||
from axolotl.prompt_tokenizers import AlpacaPromptTokenizingStrategy
|
||||
from axolotl.prompters import AlpacaPrompter
|
||||
from axolotl.train import setup_model_and_trainer
|
||||
from axolotl.utils.config import normalize_config, validate_config
|
||||
from axolotl.utils.dict import DictDefault
|
||||
|
||||
from tests.e2e.utils import with_temp_dir
|
||||
from tests.hf_offline_utils import enable_hf_offline
|
||||
|
||||
|
||||
@@ -67,6 +73,85 @@ class TestPacking(unittest.TestCase):
|
||||
assert example["position_ids"][next_bos_index] == 0
|
||||
assert example["position_ids"][next_bos_index + 1] == 1
|
||||
|
||||
@with_temp_dir
|
||||
def test_lora_packing(self, temp_dir):
|
||||
# pylint: disable=duplicate-code
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||
"tokenizer_type": "AutoTokenizer",
|
||||
"sequence_len": 1024,
|
||||
"sample_packing": True,
|
||||
"multipack_real_batches": False,
|
||||
"eval_sample_packing": True,
|
||||
"adapter": "lora",
|
||||
"lora_r": 32,
|
||||
"lora_alpha": 64,
|
||||
"lora_dropout": 0.05,
|
||||
"lora_target_linear": True,
|
||||
"val_set_size": 0.2,
|
||||
"special_tokens": {
|
||||
"pad_token": "<|endoftext|>",
|
||||
},
|
||||
"datasets": [
|
||||
{
|
||||
"path": "mhenrichsen/alpaca_2k_test",
|
||||
"type": "alpaca",
|
||||
},
|
||||
],
|
||||
"num_epochs": 1,
|
||||
"max_steps": 20,
|
||||
"save_steps": 10,
|
||||
"micro_batch_size": 8,
|
||||
"gradient_accumulation_steps": 1,
|
||||
"output_dir": temp_dir,
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "adamw_torch_fused",
|
||||
"lr_scheduler": "cosine",
|
||||
"fp16": False,
|
||||
"bf16": False,
|
||||
}
|
||||
)
|
||||
|
||||
cfg = validate_config(cfg)
|
||||
normalize_config(cfg)
|
||||
cli_args = TrainerCliArgs()
|
||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||
|
||||
(
|
||||
trainer,
|
||||
_,
|
||||
_,
|
||||
_,
|
||||
_,
|
||||
) = setup_model_and_trainer(cfg, dataset_meta)
|
||||
|
||||
sampler = trainer._get_eval_sampler( # pylint: disable=protected-access
|
||||
trainer.eval_dataset
|
||||
)
|
||||
assert "MultipackBatchSampler" in sampler.__class__.__name__
|
||||
assert (
|
||||
"V2BatchSamplerDataCollatorForSeq2Seq"
|
||||
in trainer.eval_data_collator.__class__.__name__
|
||||
)
|
||||
dataloader = trainer.get_eval_dataloader(trainer.eval_dataset)
|
||||
dataloader_iter = iter(dataloader)
|
||||
batch = next(dataloader_iter)
|
||||
assert batch["input_ids"].shape == (1, 8192)
|
||||
|
||||
sampler = trainer._get_train_sampler( # pylint: disable=protected-access
|
||||
trainer.train_dataset
|
||||
)
|
||||
assert "MultipackBatchSampler" in sampler.__class__.__name__
|
||||
assert (
|
||||
"V2BatchSamplerDataCollatorForSeq2Seq"
|
||||
in trainer.train_data_collator.__class__.__name__
|
||||
)
|
||||
dataloader = trainer.get_train_dataloader()
|
||||
dataloader_iter = iter(dataloader)
|
||||
batch = next(dataloader_iter)
|
||||
assert batch["input_ids"].shape == (1, 8192)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
Reference in New Issue
Block a user