diff --git a/tests/e2e/multigpu/test_llama.py b/tests/e2e/multigpu/test_llama.py index c9938a155..cad043801 100644 --- a/tests/e2e/multigpu/test_llama.py +++ b/tests/e2e/multigpu/test_llama.py @@ -9,6 +9,7 @@ from pathlib import Path import pytest import yaml from accelerate.test_utils import execute_subprocess_async +from e2e.utils import check_tensorboard from huggingface_hub import snapshot_download from transformers.testing_utils import get_torch_dist_unique_port @@ -53,7 +54,7 @@ class TestMultiGPULlama: }, ], "num_epochs": 1, - "max_steps": 15, + "max_steps": 5, "micro_batch_size": 4, "gradient_accumulation_steps": 4, "output_dir": temp_dir, @@ -61,6 +62,7 @@ class TestMultiGPULlama: "optimizer": "adamw_8bit", "lr_scheduler": "cosine", "flash_attention": True, + "use_tensorboard": True, } ) @@ -83,6 +85,10 @@ class TestMultiGPULlama: ] ) + check_tensorboard( + temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high" + ) + @pytest.mark.parametrize( "gradient_accumulation_steps", [1, 4], @@ -112,7 +118,7 @@ class TestMultiGPULlama: }, ], "num_epochs": 1, - "max_steps": 15, + "max_steps": 5, "micro_batch_size": 4, "gradient_accumulation_steps": gradient_accumulation_steps, "output_dir": temp_dir, @@ -120,6 +126,7 @@ class TestMultiGPULlama: "optimizer": "adamw_8bit", "lr_scheduler": "cosine", "flash_attention": True, + "use_tensorboard": True, } ) @@ -142,6 +149,10 @@ class TestMultiGPULlama: ] ) + check_tensorboard( + temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high" + ) + def test_dpo_lora_ddp(self, temp_dir): # pylint: disable=duplicate-code cfg = DictDefault( @@ -180,7 +191,7 @@ class TestMultiGPULlama: }, ], "num_epochs": 1, - "max_steps": 15, + "max_steps": 5, "micro_batch_size": 4, "gradient_accumulation_steps": 4, "output_dir": temp_dir, @@ -189,6 +200,7 @@ class TestMultiGPULlama: "optimizer": "adamw_8bit", "lr_scheduler": "cosine", "flash_attention": True, + "use_tensorboard": True, } ) @@ -211,6 +223,10 @@ class TestMultiGPULlama: ] ) + check_tensorboard( + temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high" + ) + def test_dpo_qlora_ddp(self, temp_dir): # pylint: disable=duplicate-code cfg = DictDefault( @@ -249,7 +265,7 @@ class TestMultiGPULlama: }, ], "num_epochs": 1, - "max_steps": 15, + "max_steps": 5, "micro_batch_size": 4, "gradient_accumulation_steps": 4, "output_dir": temp_dir, @@ -258,6 +274,7 @@ class TestMultiGPULlama: "optimizer": "adamw_8bit", "lr_scheduler": "cosine", "flash_attention": True, + "use_tensorboard": True, } ) @@ -280,6 +297,10 @@ class TestMultiGPULlama: ] ) + check_tensorboard( + temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high" + ) + @pytest.mark.parametrize( "gradient_accumulation_steps", [1, 4], @@ -301,7 +322,7 @@ class TestMultiGPULlama: }, ], "num_epochs": 1, - "max_steps": 10, + "max_steps": 5, "micro_batch_size": 4, "gradient_accumulation_steps": gradient_accumulation_steps, "output_dir": temp_dir, @@ -323,6 +344,7 @@ class TestMultiGPULlama: "fsdp_state_dict_type": "FULL_STATE_DICT", "fsdp_auto_wrap_policy": "TRANSFORMER_BASED_WRAP", }, + "use_tensorboard": True, } ) @@ -345,6 +367,10 @@ class TestMultiGPULlama: ] ) + check_tensorboard( + temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high" + ) + @pytest.mark.parametrize( "fsdp_state_dict_type", ["FULL_STATE_DICT", "SHARDED_STATE_DICT"], @@ -368,7 +394,7 @@ class TestMultiGPULlama: }, ], "num_epochs": 1, - "max_steps": 15, + "max_steps": 5, "micro_batch_size": 4, "gradient_accumulation_steps": 4, "output_dir": temp_dir, @@ -390,6 +416,7 @@ class TestMultiGPULlama: "fsdp_state_dict_type": fsdp_state_dict_type, "fsdp_auto_wrap_policy": "TRANSFORMER_BASED_WRAP", }, + "use_tensorboard": True, } ) @@ -412,6 +439,10 @@ class TestMultiGPULlama: ] ) + check_tensorboard( + temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high" + ) + def test_fsdp_qlora_prequant_packed(self, temp_dir): # pylint: disable=duplicate-code cfg = DictDefault( @@ -444,7 +475,7 @@ class TestMultiGPULlama: }, ], "num_epochs": 1, - "max_steps": 15, + "max_steps": 5, "micro_batch_size": 4, "gradient_accumulation_steps": 4, "output_dir": temp_dir, @@ -466,6 +497,7 @@ class TestMultiGPULlama: "fsdp_state_dict_type": "SHARDED_STATE_DICT", "fsdp_auto_wrap_policy": "TRANSFORMER_BASED_WRAP", }, + "use_tensorboard": True, } ) @@ -488,12 +520,41 @@ class TestMultiGPULlama: ] ) + check_tensorboard( + temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high" + ) + @pytest.mark.parametrize( "gradient_accumulation_steps", [1, 4], ) - def test_ds_zero3_packed(self, temp_dir, gradient_accumulation_steps): + @pytest.mark.parametrize( + "deepspeed", + [ + "deepspeed_configs/zero3_bf16.json", + "deepspeed_configs/zero3_bf16_cpuoffload_all.json", + "deepspeed_configs/zero3_bf16_cpuoffload_params.json", + ], + ) + @pytest.mark.parametrize( + "qlora", + [True, False], + ) + def test_ds_zero3_packed( + self, temp_dir, gradient_accumulation_steps, deepspeed, qlora + ): # pylint: disable=duplicate-code + if qlora: + adapter = { + "adapter": "qlora", + "lora_r": 8, + "lora_alpha": 16, + "lora_dropout": 0.05, + "lora_target_linear": True, + "load_in_4bit": True, + } + else: + adapter = {} cfg = DictDefault( { "base_model": "HuggingFaceTB/SmolLM2-135M", @@ -511,15 +572,17 @@ class TestMultiGPULlama: }, ], "num_epochs": 1, - "max_steps": 15, - "micro_batch_size": 4, + "max_steps": 5, + "micro_batch_size": 2, "gradient_accumulation_steps": gradient_accumulation_steps, "output_dir": temp_dir, "learning_rate": 0.00001, "optimizer": "adamw_torch", "lr_scheduler": "cosine", "flash_attention": True, - "deepspeed": str(AXOLOTL_ROOT / "deepspeed_configs/zero3_bf16.json"), + "deepspeed": str(AXOLOTL_ROOT / deepspeed), + "use_tensorboard": True, + **adapter, } ) @@ -542,19 +605,35 @@ class TestMultiGPULlama: ] ) - def test_ds_zero3_qlora_packed(self, temp_dir): + check_tensorboard( + temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high" + ) + + @pytest.mark.parametrize( + "gradient_accumulation_steps", + [1, 4], + ) + @pytest.mark.parametrize( + "qlora", + [True, False], + ) + def test_ds_zero2_packed(self, temp_dir, gradient_accumulation_steps, qlora): # pylint: disable=duplicate-code - cfg = DictDefault( - { - "base_model": "HuggingFaceTB/SmolLM2-135M", - "load_in_4bit": True, + if qlora: + adapter = { "adapter": "qlora", "lora_r": 8, "lora_alpha": 16, "lora_dropout": 0.05, "lora_target_linear": True, + "load_in_4bit": True, + } + else: + adapter = {} + cfg = DictDefault( + { + "base_model": "HuggingFaceTB/SmolLM2-135M", "sample_packing": True, - "eval_sample_packing": False, "pad_to_sequence_len": True, "sequence_len": 2048, "val_set_size": 0.05, @@ -568,15 +647,17 @@ class TestMultiGPULlama: }, ], "num_epochs": 1, - "max_steps": 15, - "micro_batch_size": 4, - "gradient_accumulation_steps": 4, + "max_steps": 5, + "micro_batch_size": 2, + "gradient_accumulation_steps": gradient_accumulation_steps, "output_dir": temp_dir, - "learning_rate": 0.0001, + "learning_rate": 0.00001, "optimizer": "adamw_torch", "lr_scheduler": "cosine", "flash_attention": True, - "deepspeed": str(AXOLOTL_ROOT / "deepspeed_configs/zero3_bf16.json"), + "deepspeed": str(AXOLOTL_ROOT / "deepspeed_configs/zero2.json"), + "use_tensorboard": True, + **adapter, } ) @@ -598,3 +679,82 @@ class TestMultiGPULlama: str(Path(temp_dir) / "config.yaml"), ] ) + + check_tensorboard( + temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high" + ) + + @pytest.mark.parametrize( + "gradient_accumulation_steps", + [1, 4], + ) + @pytest.mark.parametrize( + "qlora", + [True, False], + ) + def test_ds_zero1_packed(self, temp_dir, gradient_accumulation_steps, qlora): + # pylint: disable=duplicate-code + if qlora: + adapter = { + "adapter": "qlora", + "lora_r": 8, + "lora_alpha": 16, + "lora_dropout": 0.05, + "lora_target_linear": True, + "load_in_4bit": True, + } + else: + adapter = {} + cfg = DictDefault( + { + "base_model": "HuggingFaceTB/SmolLM2-135M", + "sample_packing": True, + "pad_to_sequence_len": True, + "sequence_len": 2048, + "val_set_size": 0.05, + "special_tokens": { + "pad_token": "<|endoftext|>", + }, + "datasets": [ + { + "path": "tatsu-lab/alpaca", + "type": "alpaca", + }, + ], + "num_epochs": 1, + "max_steps": 5, + "micro_batch_size": 2, + "gradient_accumulation_steps": gradient_accumulation_steps, + "output_dir": temp_dir, + "learning_rate": 0.00001, + "optimizer": "adamw_torch", + "lr_scheduler": "cosine", + "flash_attention": True, + "deepspeed": str(AXOLOTL_ROOT / "deepspeed_configs/zero1.json"), + "use_tensorboard": True, + **adapter, + } + ) + + # write cfg to yaml file + Path(temp_dir).mkdir(parents=True, exist_ok=True) + with open(Path(temp_dir) / "config.yaml", "w", encoding="utf-8") as fout: + fout.write(yaml.dump(cfg.to_dict(), Dumper=yaml.Dumper)) + + execute_subprocess_async( + [ + "accelerate", + "launch", + "--num-processes", + "2", + "--main_process_port", + f"{get_torch_dist_unique_port()}", + "-m", + "axolotl.cli.train", + str(Path(temp_dir) / "config.yaml"), + ] + ) + + check_tensorboard( + temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high" + )