From 26cd287cabc04946ab9fa066bf9586aa7558622c Mon Sep 17 00:00:00 2001 From: bursteratom Date: Wed, 11 Dec 2024 16:15:44 -0500 Subject: [PATCH] switching test hymba order --- tests/e2e/test_packing_loss.py | 126 ++++++++++++++++----------------- 1 file changed, 63 insertions(+), 63 deletions(-) diff --git a/tests/e2e/test_packing_loss.py b/tests/e2e/test_packing_loss.py index 43f623ca6..22964be95 100644 --- a/tests/e2e/test_packing_loss.py +++ b/tests/e2e/test_packing_loss.py @@ -70,69 +70,6 @@ class TestPackedLlama(unittest.TestCase): ) -class TestPackedHymba(unittest.TestCase): - """ - Test case for Packed training of hymba models - """ - - @require_torch_2_5_1 - @with_temp_dir - def test_loss_packed(self, temp_dir): - # pylint: disable=duplicate-code - cfg = DictDefault( - { - "base_model": "nvidia/Hymba-1.5B-Base", - "trust_remote_code": True, - "load_in_4bit": True, - "adapter": "qlora", - "lora_r": 32, - "lora_alpha": 16, - "lora_dropout": 0.05, - "lora_target_modules": [ - "gate_proj", - "down_proj", - "up_proj", - "q_proj", - "v_proj", - "k_proj", - "o_proj", - ], - "sequence_len": 1024, - "sample_packing": True, - "flash_attention": True, - "val_set_size": 0.0, - "datasets": [ - { - "path": "vicgalle/alpaca-gpt4", - "type": "alpaca", - }, - ], - "num_epochs": 1, - "micro_batch_size": 2, - "gradient_accumulation_steps": 4, - "output_dir": temp_dir, - "learning_rate": 0.00001, - "optimizer": "adamw_torch", - "lr_scheduler": "cosine", - "max_steps": 5, - "use_tensorboard": True, - } - ) - if is_torch_bf16_gpu_available(): - cfg.bf16 = True - else: - cfg.fp16 = True - normalize_config(cfg) - cli_args = TrainerCliArgs() - dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args) - - train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta) - - check_tensorboard( - temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high" - ) - - class TestUnpackedHymba(unittest.TestCase): """ Test case for Unpacked training of hymba models @@ -194,3 +131,66 @@ class TestUnpackedHymba(unittest.TestCase): check_tensorboard( temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high" ) + + +class TestPackedHymba(unittest.TestCase): + """ + Test case for Packed training of hymba models + """ + + @require_torch_2_5_1 + @with_temp_dir + def test_loss_packed(self, temp_dir): + # pylint: disable=duplicate-code + cfg = DictDefault( + { + "base_model": "nvidia/Hymba-1.5B-Base", + "trust_remote_code": True, + "load_in_4bit": True, + "adapter": "qlora", + "lora_r": 32, + "lora_alpha": 16, + "lora_dropout": 0.05, + "lora_target_modules": [ + "gate_proj", + "down_proj", + "up_proj", + "q_proj", + "v_proj", + "k_proj", + "o_proj", + ], + "sequence_len": 1024, + "sample_packing": True, + "flash_attention": True, + "val_set_size": 0.0, + "datasets": [ + { + "path": "vicgalle/alpaca-gpt4", + "type": "alpaca", + }, + ], + "num_epochs": 1, + "micro_batch_size": 2, + "gradient_accumulation_steps": 4, + "output_dir": temp_dir, + "learning_rate": 0.00001, + "optimizer": "adamw_torch", + "lr_scheduler": "cosine", + "max_steps": 5, + "use_tensorboard": True, + } + ) + if is_torch_bf16_gpu_available(): + cfg.bf16 = True + else: + cfg.fp16 = True + normalize_config(cfg) + cli_args = TrainerCliArgs() + dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args) + + train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta) + + check_tensorboard( + temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high" + )