From ac471a697a99a77fd92c2b3a2228a838e2a77310 Mon Sep 17 00:00:00 2001 From: salman Date: Thu, 30 Jan 2025 16:45:56 +0000 Subject: [PATCH] updating to fused (#2293) --- examples/cerebras/btlm-ft.yml | 2 +- examples/deepseek-v2/fft-fsdp-16b.yaml | 2 +- examples/deepseek-v2/qlora-fsdp-2_5.yaml | 2 +- examples/jamba/qlora_fsdp_large.yaml | 2 +- examples/llama-2/gptq-lora.yml | 2 +- examples/llama-2/qlora-fsdp.yml | 2 +- examples/llama-3/fft-8b-liger-fsdp.yaml | 2 +- examples/llama-3/qlora-fsdp-405b.yaml | 2 +- examples/llama-3/qlora-fsdp-70b.yaml | 2 +- examples/mistral/lora-mps.yml | 2 +- examples/mistral/mixtral-8x22b-qlora-fsdp.yml | 2 +- examples/mistral/mixtral-qlora-fsdp.yml | 2 +- examples/phi/phi-ft.yml | 2 +- examples/phi/phi-qlora.yml | 2 +- examples/phi/phi2-ft.yml | 2 +- examples/phi/phi3-ft-fsdp.yml | 2 +- examples/phi/phi3-ft.yml | 2 +- examples/qwen2/qlora-fsdp.yaml | 2 +- examples/tiny-llama/lora-mps.yml | 2 +- tests/core/test_trainer_builder.py | 2 +- tests/e2e/integrations/test_cut_cross_entropy.py | 2 +- tests/e2e/integrations/test_liger.py | 4 ++-- tests/e2e/multigpu/test_llama.py | 12 ++++++------ tests/e2e/multigpu/test_qwen2.py | 2 +- tests/e2e/patched/test_4d_multipack_llama.py | 4 ++-- tests/e2e/patched/test_fused_llama.py | 2 +- tests/e2e/patched/test_llama_s2_attention.py | 4 ++-- tests/e2e/patched/test_lora_llama_multipack.py | 4 ++-- tests/e2e/patched/test_mistral_samplepack.py | 4 ++-- tests/e2e/test_embeddings_lr.py | 4 ++-- tests/e2e/test_falcon.py | 6 +++--- tests/e2e/test_llama_pretrain.py | 2 +- tests/e2e/test_load_model.py | 2 +- tests/e2e/test_lora_llama.py | 2 +- tests/e2e/test_mamba.py | 2 +- tests/e2e/test_mistral.py | 4 ++-- tests/e2e/test_packing_loss.py | 2 +- 37 files changed, 51 insertions(+), 51 deletions(-) diff --git a/examples/cerebras/btlm-ft.yml b/examples/cerebras/btlm-ft.yml index 780616e04..44be53996 100644 --- a/examples/cerebras/btlm-ft.yml +++ b/examples/cerebras/btlm-ft.yml @@ -46,7 +46,7 @@ output_dir: ./outputs/btlm-out gradient_accumulation_steps: 1 micro_batch_size: 1 num_epochs: 1 -optimizer: adamw_torch +optimizer: adamw_torch_fused adam_beta2: 0.95 adam_eps: 0.000000001 max_grad_norm: 1.0 diff --git a/examples/deepseek-v2/fft-fsdp-16b.yaml b/examples/deepseek-v2/fft-fsdp-16b.yaml index 3d4608a01..649317494 100644 --- a/examples/deepseek-v2/fft-fsdp-16b.yaml +++ b/examples/deepseek-v2/fft-fsdp-16b.yaml @@ -27,7 +27,7 @@ wandb_log_model: gradient_accumulation_steps: 8 micro_batch_size: 1 num_epochs: 1 -optimizer: adamw_torch +optimizer: adamw_torch_fused lr_scheduler: cosine learning_rate: 2e-5 diff --git a/examples/deepseek-v2/qlora-fsdp-2_5.yaml b/examples/deepseek-v2/qlora-fsdp-2_5.yaml index a89dc343a..009e46a2f 100644 --- a/examples/deepseek-v2/qlora-fsdp-2_5.yaml +++ b/examples/deepseek-v2/qlora-fsdp-2_5.yaml @@ -47,7 +47,7 @@ peft_use_rslora: true gradient_accumulation_steps: 1 micro_batch_size: 8 num_epochs: 1 -optimizer: adamw_torch +optimizer: adamw_torch_fused lr_scheduler: cosine learning_rate: 2e-5 diff --git a/examples/jamba/qlora_fsdp_large.yaml b/examples/jamba/qlora_fsdp_large.yaml index 8736680da..594847330 100644 --- a/examples/jamba/qlora_fsdp_large.yaml +++ b/examples/jamba/qlora_fsdp_large.yaml @@ -34,7 +34,7 @@ lora_target_linear: false gradient_accumulation_steps: 4 micro_batch_size: 1 num_epochs: 2 -optimizer: adamw_torch +optimizer: adamw_torch_fused lr_scheduler: cosine learning_rate: 0.00001 diff --git a/examples/llama-2/gptq-lora.yml b/examples/llama-2/gptq-lora.yml index 7e45f7d63..7d6b90ee3 100644 --- a/examples/llama-2/gptq-lora.yml +++ b/examples/llama-2/gptq-lora.yml @@ -42,7 +42,7 @@ output_dir: ./outputs/model-out gradient_accumulation_steps: 1 micro_batch_size: 1 num_epochs: 4 -optimizer: adamw_torch +optimizer: adamw_torch_fused adam_beta2: 0.95 adam_eps: 0.00001 max_grad_norm: 1.0 diff --git a/examples/llama-2/qlora-fsdp.yml b/examples/llama-2/qlora-fsdp.yml index 204c91693..c2db26b81 100644 --- a/examples/llama-2/qlora-fsdp.yml +++ b/examples/llama-2/qlora-fsdp.yml @@ -39,7 +39,7 @@ wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 4 num_epochs: 4 -optimizer: adamw_torch +optimizer: adamw_torch_fused lr_scheduler: cosine learning_rate: 0.00001 diff --git a/examples/llama-3/fft-8b-liger-fsdp.yaml b/examples/llama-3/fft-8b-liger-fsdp.yaml index 2c8589b17..aa8a7c5e8 100644 --- a/examples/llama-3/fft-8b-liger-fsdp.yaml +++ b/examples/llama-3/fft-8b-liger-fsdp.yaml @@ -37,7 +37,7 @@ wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 2 num_epochs: 1 -optimizer: adamw_torch +optimizer: adamw_torch_fused lr_scheduler: cosine learning_rate: 2e-5 diff --git a/examples/llama-3/qlora-fsdp-405b.yaml b/examples/llama-3/qlora-fsdp-405b.yaml index a60a97ef3..434ee67c4 100644 --- a/examples/llama-3/qlora-fsdp-405b.yaml +++ b/examples/llama-3/qlora-fsdp-405b.yaml @@ -30,7 +30,7 @@ lora_target_linear: true gradient_accumulation_steps: 4 micro_batch_size: 1 num_epochs: 2 -optimizer: adamw_torch +optimizer: adamw_torch_fused lr_scheduler: cosine learning_rate: 0.00001 diff --git a/examples/llama-3/qlora-fsdp-70b.yaml b/examples/llama-3/qlora-fsdp-70b.yaml index 932e1a0d6..ceb2d8567 100644 --- a/examples/llama-3/qlora-fsdp-70b.yaml +++ b/examples/llama-3/qlora-fsdp-70b.yaml @@ -39,7 +39,7 @@ wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 1 num_epochs: 4 -optimizer: adamw_torch +optimizer: adamw_torch_fused lr_scheduler: cosine learning_rate: 0.00001 diff --git a/examples/mistral/lora-mps.yml b/examples/mistral/lora-mps.yml index c1df9896c..a62990e56 100644 --- a/examples/mistral/lora-mps.yml +++ b/examples/mistral/lora-mps.yml @@ -47,7 +47,7 @@ wandb_log_model: gradient_accumulation_steps: 8 micro_batch_size: 1 num_epochs: 2 -optimizer: adamw_torch +optimizer: adamw_torch_fused lr_scheduler: cosine learning_rate: 0.0002 diff --git a/examples/mistral/mixtral-8x22b-qlora-fsdp.yml b/examples/mistral/mixtral-8x22b-qlora-fsdp.yml index 4a65b1a7d..353c08d85 100644 --- a/examples/mistral/mixtral-8x22b-qlora-fsdp.yml +++ b/examples/mistral/mixtral-8x22b-qlora-fsdp.yml @@ -41,7 +41,7 @@ wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 2 num_epochs: 1 -optimizer: adamw_torch +optimizer: adamw_torch_fused lr_scheduler: cosine learning_rate: 0.0002 diff --git a/examples/mistral/mixtral-qlora-fsdp.yml b/examples/mistral/mixtral-qlora-fsdp.yml index fbd9bd937..f9b5ab606 100644 --- a/examples/mistral/mixtral-qlora-fsdp.yml +++ b/examples/mistral/mixtral-qlora-fsdp.yml @@ -43,7 +43,7 @@ wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 2 num_epochs: 1 -optimizer: adamw_torch +optimizer: adamw_torch_fused lr_scheduler: cosine learning_rate: 0.0002 diff --git a/examples/phi/phi-ft.yml b/examples/phi/phi-ft.yml index fc5848dc5..29fad3094 100644 --- a/examples/phi/phi-ft.yml +++ b/examples/phi/phi-ft.yml @@ -38,7 +38,7 @@ wandb_log_model: gradient_accumulation_steps: 1 micro_batch_size: 2 num_epochs: 4 -optimizer: adamw_torch +optimizer: adamw_torch_fused adam_beta2: 0.95 adam_epsilon: 0.00001 max_grad_norm: 1.0 diff --git a/examples/phi/phi-qlora.yml b/examples/phi/phi-qlora.yml index a98cd1040..d9f23ff26 100644 --- a/examples/phi/phi-qlora.yml +++ b/examples/phi/phi-qlora.yml @@ -38,7 +38,7 @@ wandb_log_model: gradient_accumulation_steps: 1 micro_batch_size: 2 num_epochs: 4 -optimizer: adamw_torch +optimizer: adamw_torch_fused adam_beta2: 0.95 adam_epsilon: 0.00001 max_grad_norm: 1.0 diff --git a/examples/phi/phi2-ft.yml b/examples/phi/phi2-ft.yml index 0f656f821..1b7ac89ec 100644 --- a/examples/phi/phi2-ft.yml +++ b/examples/phi/phi2-ft.yml @@ -38,7 +38,7 @@ wandb_log_model: gradient_accumulation_steps: 1 micro_batch_size: 2 num_epochs: 4 -optimizer: adamw_torch +optimizer: adamw_torch_fused adam_beta2: 0.95 adam_epsilon: 0.00001 max_grad_norm: 1.0 diff --git a/examples/phi/phi3-ft-fsdp.yml b/examples/phi/phi3-ft-fsdp.yml index c081e47b9..1479bb97f 100644 --- a/examples/phi/phi3-ft-fsdp.yml +++ b/examples/phi/phi3-ft-fsdp.yml @@ -39,7 +39,7 @@ wandb_log_model: gradient_accumulation_steps: 2 micro_batch_size: 12 num_epochs: 2 -optimizer: adamw_torch +optimizer: adamw_torch_fused adam_beta2: 0.95 adam_epsilon: 0.00001 max_grad_norm: 1.0 diff --git a/examples/phi/phi3-ft.yml b/examples/phi/phi3-ft.yml index ac42153af..58afd940e 100644 --- a/examples/phi/phi3-ft.yml +++ b/examples/phi/phi3-ft.yml @@ -35,7 +35,7 @@ lora_fan_in_fan_out: gradient_accumulation_steps: 1 micro_batch_size: 2 num_epochs: 1 -optimizer: adamw_torch +optimizer: adamw_torch_fused adam_beta2: 0.95 adam_epsilon: 0.00001 max_grad_norm: 1.0 diff --git a/examples/qwen2/qlora-fsdp.yaml b/examples/qwen2/qlora-fsdp.yaml index cc4974908..c537d3244 100644 --- a/examples/qwen2/qlora-fsdp.yaml +++ b/examples/qwen2/qlora-fsdp.yaml @@ -37,7 +37,7 @@ wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 1 num_epochs: 4 -optimizer: adamw_torch +optimizer: adamw_torch_fused lr_scheduler: cosine learning_rate: 0.0002 diff --git a/examples/tiny-llama/lora-mps.yml b/examples/tiny-llama/lora-mps.yml index f949acd0f..c777a4d7b 100644 --- a/examples/tiny-llama/lora-mps.yml +++ b/examples/tiny-llama/lora-mps.yml @@ -38,7 +38,7 @@ wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 2 num_epochs: 4 -optimizer: adamw_torch +optimizer: adamw_torch_fused lr_scheduler: cosine learning_rate: 0.0002 diff --git a/tests/core/test_trainer_builder.py b/tests/core/test_trainer_builder.py index 558d3cb95..fbfd7a87c 100644 --- a/tests/core/test_trainer_builder.py +++ b/tests/core/test_trainer_builder.py @@ -22,7 +22,7 @@ def fixture_cfg(): "output_dir": "./model-out", "warmup_steps": 10, "gradient_checkpointing": False, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "sequence_len": 2048, "rl": True, "adam_beta1": 0.998, diff --git a/tests/e2e/integrations/test_cut_cross_entropy.py b/tests/e2e/integrations/test_cut_cross_entropy.py index 291a4a4ec..f65d65ee4 100644 --- a/tests/e2e/integrations/test_cut_cross_entropy.py +++ b/tests/e2e/integrations/test_cut_cross_entropy.py @@ -39,7 +39,7 @@ def min_cfg(temp_dir): "micro_batch_size": 8, "gradient_accumulation_steps": 1, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "output_dir": temp_dir, "lr_scheduler": "cosine", "save_safetensors": True, diff --git a/tests/e2e/integrations/test_liger.py b/tests/e2e/integrations/test_liger.py index 1efe889e4..cf673cab2 100644 --- a/tests/e2e/integrations/test_liger.py +++ b/tests/e2e/integrations/test_liger.py @@ -48,7 +48,7 @@ class LigerIntegrationTestCase: "gradient_accumulation_steps": 2, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "save_safetensors": True, "bf16": "auto", @@ -93,7 +93,7 @@ class LigerIntegrationTestCase: "gradient_accumulation_steps": 2, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "save_safetensors": True, "bf16": "auto", diff --git a/tests/e2e/multigpu/test_llama.py b/tests/e2e/multigpu/test_llama.py index bb1874b0b..0f91fe056 100644 --- a/tests/e2e/multigpu/test_llama.py +++ b/tests/e2e/multigpu/test_llama.py @@ -331,7 +331,7 @@ class TestMultiGPULlama: "gradient_accumulation_steps": gradient_accumulation_steps, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "flash_attention": True, "fsdp": [ @@ -401,7 +401,7 @@ class TestMultiGPULlama: "gradient_accumulation_steps": 4, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "flash_attention": True, "fsdp": [ @@ -480,7 +480,7 @@ class TestMultiGPULlama: "gradient_accumulation_steps": 4, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "flash_attention": True, "fsdp": [ @@ -575,7 +575,7 @@ class TestMultiGPULlama: "gradient_accumulation_steps": gradient_accumulation_steps, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "flash_attention": True, "deepspeed": str(AXOLOTL_ROOT / deepspeed), @@ -648,7 +648,7 @@ class TestMultiGPULlama: "gradient_accumulation_steps": gradient_accumulation_steps, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "flash_attention": True, "deepspeed": str(AXOLOTL_ROOT / "deepspeed_configs/zero2.json"), @@ -721,7 +721,7 @@ class TestMultiGPULlama: "gradient_accumulation_steps": gradient_accumulation_steps, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "flash_attention": True, "deepspeed": str(AXOLOTL_ROOT / "deepspeed_configs/zero1.json"), diff --git a/tests/e2e/multigpu/test_qwen2.py b/tests/e2e/multigpu/test_qwen2.py index 2b9884848..1895e1ee8 100644 --- a/tests/e2e/multigpu/test_qwen2.py +++ b/tests/e2e/multigpu/test_qwen2.py @@ -52,7 +52,7 @@ class TestMultiGPUQwen2: "gradient_accumulation_steps": 2, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "flash_attention": True, "bf16": "auto", diff --git a/tests/e2e/patched/test_4d_multipack_llama.py b/tests/e2e/patched/test_4d_multipack_llama.py index af8eb3742..7beb71145 100644 --- a/tests/e2e/patched/test_4d_multipack_llama.py +++ b/tests/e2e/patched/test_4d_multipack_llama.py @@ -52,7 +52,7 @@ class Test4dMultipackLlama(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 20, "save_steps": 10, @@ -96,7 +96,7 @@ class Test4dMultipackLlama(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 20, "save_steps": 10, diff --git a/tests/e2e/patched/test_fused_llama.py b/tests/e2e/patched/test_fused_llama.py index e7ab510c9..f8f245514 100644 --- a/tests/e2e/patched/test_fused_llama.py +++ b/tests/e2e/patched/test_fused_llama.py @@ -56,7 +56,7 @@ class TestFusedLlama(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 10, "save_steps": 5, diff --git a/tests/e2e/patched/test_llama_s2_attention.py b/tests/e2e/patched/test_llama_s2_attention.py index 8d0ba6c2a..cfa70fd73 100644 --- a/tests/e2e/patched/test_llama_s2_attention.py +++ b/tests/e2e/patched/test_llama_s2_attention.py @@ -56,7 +56,7 @@ class TestLlamaShiftedSparseAttention(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 10, "save_steps": 5, @@ -96,7 +96,7 @@ class TestLlamaShiftedSparseAttention(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 10, "save_steps": 5, diff --git a/tests/e2e/patched/test_lora_llama_multipack.py b/tests/e2e/patched/test_lora_llama_multipack.py index bc18e3d81..e544eb4fd 100644 --- a/tests/e2e/patched/test_lora_llama_multipack.py +++ b/tests/e2e/patched/test_lora_llama_multipack.py @@ -61,7 +61,7 @@ class TestLoraLlama(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", } ) @@ -116,7 +116,7 @@ class TestLoraLlama(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", } ) diff --git a/tests/e2e/patched/test_mistral_samplepack.py b/tests/e2e/patched/test_mistral_samplepack.py index c7fd0ecbc..f9e523679 100644 --- a/tests/e2e/patched/test_mistral_samplepack.py +++ b/tests/e2e/patched/test_mistral_samplepack.py @@ -55,7 +55,7 @@ class TestMistral(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 20, "save_steps": 10, @@ -96,7 +96,7 @@ class TestMistral(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 20, "save_steps": 10, diff --git a/tests/e2e/test_embeddings_lr.py b/tests/e2e/test_embeddings_lr.py index 4261ccc26..a1e9b5d46 100644 --- a/tests/e2e/test_embeddings_lr.py +++ b/tests/e2e/test_embeddings_lr.py @@ -48,7 +48,7 @@ class TestEmbeddingsLrScale(unittest.TestCase): "val_set_size": 0.0, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "embedding_lr_scale": 0.5, "lr_scheduler": "cosine", "save_safetensors": True, @@ -92,7 +92,7 @@ class TestEmbeddingsLrScale(unittest.TestCase): "val_set_size": 0.0, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "embedding_lr": 0.000005, "lr_scheduler": "cosine", "save_safetensors": True, diff --git a/tests/e2e/test_falcon.py b/tests/e2e/test_falcon.py index ddcb66275..738a0e0b0 100644 --- a/tests/e2e/test_falcon.py +++ b/tests/e2e/test_falcon.py @@ -57,7 +57,7 @@ class TestFalcon(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 20, "save_steps": 10, @@ -110,7 +110,7 @@ class TestFalcon(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 20, "save_steps": 10, @@ -149,7 +149,7 @@ class TestFalcon(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 20, "save_steps": 10, diff --git a/tests/e2e/test_llama_pretrain.py b/tests/e2e/test_llama_pretrain.py index c1f024b87..5cd1c693a 100644 --- a/tests/e2e/test_llama_pretrain.py +++ b/tests/e2e/test_llama_pretrain.py @@ -62,7 +62,7 @@ class TestPretrainLlama: "val_set_size": 0.0, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "save_safetensors": True, "bf16": "auto", diff --git a/tests/e2e/test_load_model.py b/tests/e2e/test_load_model.py index 31a9b1a87..255b096b0 100644 --- a/tests/e2e/test_load_model.py +++ b/tests/e2e/test_load_model.py @@ -52,7 +52,7 @@ class TestLoadModelUtils: "micro_batch_size": 8, "gradient_accumulation_steps": 1, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", } ) diff --git a/tests/e2e/test_lora_llama.py b/tests/e2e/test_lora_llama.py index 696c47aed..8ebedf276 100644 --- a/tests/e2e/test_lora_llama.py +++ b/tests/e2e/test_lora_llama.py @@ -54,7 +54,7 @@ class TestLoraLlama(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 20, } diff --git a/tests/e2e/test_mamba.py b/tests/e2e/test_mamba.py index 4b4db3058..8cadf4932 100644 --- a/tests/e2e/test_mamba.py +++ b/tests/e2e/test_mamba.py @@ -51,7 +51,7 @@ class TestMamba(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 20, "save_steps": 10, diff --git a/tests/e2e/test_mistral.py b/tests/e2e/test_mistral.py index a304e9b4a..17732d879 100644 --- a/tests/e2e/test_mistral.py +++ b/tests/e2e/test_mistral.py @@ -56,7 +56,7 @@ class TestMistral(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 20, "save_steps": 10, @@ -95,7 +95,7 @@ class TestMistral(unittest.TestCase): "gradient_accumulation_steps": 1, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 20, "save_steps": 10, diff --git a/tests/e2e/test_packing_loss.py b/tests/e2e/test_packing_loss.py index 13244a215..a363b11b2 100644 --- a/tests/e2e/test_packing_loss.py +++ b/tests/e2e/test_packing_loss.py @@ -49,7 +49,7 @@ class TestPackedLlama(unittest.TestCase): "gradient_accumulation_steps": 4, "output_dir": temp_dir, "learning_rate": 0.00001, - "optimizer": "adamw_torch", + "optimizer": "adamw_torch_fused", "lr_scheduler": "cosine", "max_steps": 5, "use_tensorboard": True,