diff --git a/.nojekyll b/.nojekyll index 7d718cef5..ed2b46d64 100644 --- a/.nojekyll +++ b/.nojekyll @@ -1 +1 @@ -b9faa1e7 \ No newline at end of file +be620dc5 \ No newline at end of file diff --git a/FAQS.html b/FAQS.html index 34a44ffc3..fa8754b96 100644 --- a/FAQS.html +++ b/FAQS.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/TODO.html b/TODO.html index 844eed875..e28564af3 100644 --- a/TODO.html +++ b/TODO.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/amd_hpc.html b/docs/amd_hpc.html index e64edfd34..8f228f383 100644 --- a/docs/amd_hpc.html +++ b/docs/amd_hpc.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.args.html b/docs/api/cli.args.html index a5768a277..436cb9b7d 100644 --- a/docs/api/cli.args.html +++ b/docs/api/cli.args.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.checks.html b/docs/api/cli.checks.html index 36338a180..8fcc2178c 100644 --- a/docs/api/cli.checks.html +++ b/docs/api/cli.checks.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.cloud.base.html b/docs/api/cli.cloud.base.html index e2253f557..d43bd0b68 100644 --- a/docs/api/cli.cloud.base.html +++ b/docs/api/cli.cloud.base.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.cloud.modal_.html b/docs/api/cli.cloud.modal_.html index 3409317df..d1cfbb5a0 100644 --- a/docs/api/cli.cloud.modal_.html +++ b/docs/api/cli.cloud.modal_.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.config.html b/docs/api/cli.config.html index 26eac33ba..509f03f87 100644 --- a/docs/api/cli.config.html +++ b/docs/api/cli.config.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.evaluate.html b/docs/api/cli.evaluate.html index 75fd643b1..0b96dac6a 100644 --- a/docs/api/cli.evaluate.html +++ b/docs/api/cli.evaluate.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.inference.html b/docs/api/cli.inference.html index e5194b996..8e5194519 100644 --- a/docs/api/cli.inference.html +++ b/docs/api/cli.inference.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.main.html b/docs/api/cli.main.html index 22cb4ef7f..44c13c09b 100644 --- a/docs/api/cli.main.html +++ b/docs/api/cli.main.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.merge_lora.html b/docs/api/cli.merge_lora.html index 9a27f9211..0e788ff55 100644 --- a/docs/api/cli.merge_lora.html +++ b/docs/api/cli.merge_lora.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.merge_sharded_fsdp_weights.html b/docs/api/cli.merge_sharded_fsdp_weights.html index 8ea58c555..20ab2f7b9 100644 --- a/docs/api/cli.merge_sharded_fsdp_weights.html +++ b/docs/api/cli.merge_sharded_fsdp_weights.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.preprocess.html b/docs/api/cli.preprocess.html index 6fd8b0aa5..f2e9ca1ba 100644 --- a/docs/api/cli.preprocess.html +++ b/docs/api/cli.preprocess.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.sweeps.html b/docs/api/cli.sweeps.html index 866b9ec97..14431df6f 100644 --- a/docs/api/cli.sweeps.html +++ b/docs/api/cli.sweeps.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.train.html b/docs/api/cli.train.html index 964251055..251b8831f 100644 --- a/docs/api/cli.train.html +++ b/docs/api/cli.train.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.utils.html b/docs/api/cli.utils.html index abd2305cf..9639c513c 100644 --- a/docs/api/cli.utils.html +++ b/docs/api/cli.utils.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/cli.vllm_serve.html b/docs/api/cli.vllm_serve.html index 24c68142c..cb2e8975c 100644 --- a/docs/api/cli.vllm_serve.html +++ b/docs/api/cli.vllm_serve.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/common.architectures.html b/docs/api/common.architectures.html index 496e400e7..33e7235ed 100644 --- a/docs/api/common.architectures.html +++ b/docs/api/common.architectures.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/common.const.html b/docs/api/common.const.html index a3874ba7e..da12c45db 100644 --- a/docs/api/common.const.html +++ b/docs/api/common.const.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/common.datasets.html b/docs/api/common.datasets.html index 6e0fbe4f0..f3e1914f1 100644 --- a/docs/api/common.datasets.html +++ b/docs/api/common.datasets.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/convert.html b/docs/api/convert.html index 61ae8d368..e13f82536 100644 --- a/docs/api/convert.html +++ b/docs/api/convert.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/core.chat.format.chatml.html b/docs/api/core.chat.format.chatml.html index 455a9c972..59fad863f 100644 --- a/docs/api/core.chat.format.chatml.html +++ b/docs/api/core.chat.format.chatml.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/core.chat.format.llama3x.html b/docs/api/core.chat.format.llama3x.html index 257a304bb..babce270c 100644 --- a/docs/api/core.chat.format.llama3x.html +++ b/docs/api/core.chat.format.llama3x.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/core.chat.format.shared.html b/docs/api/core.chat.format.shared.html index 4aa4c9ab5..bcd7eb2dd 100644 --- a/docs/api/core.chat.format.shared.html +++ b/docs/api/core.chat.format.shared.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/core.chat.messages.html b/docs/api/core.chat.messages.html index c6a3c233a..d0cc58eab 100644 --- a/docs/api/core.chat.messages.html +++ b/docs/api/core.chat.messages.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/core.datasets.chat.html b/docs/api/core.datasets.chat.html index 716ac6660..31f659e04 100644 --- a/docs/api/core.datasets.chat.html +++ b/docs/api/core.datasets.chat.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/core.datasets.transforms.chat_builder.html b/docs/api/core.datasets.transforms.chat_builder.html index 979a6894e..d44ff4cbc 100644 --- a/docs/api/core.datasets.transforms.chat_builder.html +++ b/docs/api/core.datasets.transforms.chat_builder.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/core.trainer_builder.html b/docs/api/core.trainer_builder.html index 46182c92e..83d1c4f1e 100644 --- a/docs/api/core.trainer_builder.html +++ b/docs/api/core.trainer_builder.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/core.trainers.base.html b/docs/api/core.trainers.base.html index a51404ddc..fb0934794 100644 --- a/docs/api/core.trainers.base.html +++ b/docs/api/core.trainers.base.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/core.trainers.dpo.trainer.html b/docs/api/core.trainers.dpo.trainer.html index 56710d9a5..a1d6453e1 100644 --- a/docs/api/core.trainers.dpo.trainer.html +++ b/docs/api/core.trainers.dpo.trainer.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/core.trainers.grpo.trainer.html b/docs/api/core.trainers.grpo.trainer.html index 83a03d659..4ea3ae827 100644 --- a/docs/api/core.trainers.grpo.trainer.html +++ b/docs/api/core.trainers.grpo.trainer.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/core.trainers.trl.html b/docs/api/core.trainers.trl.html index ac86e830c..886cba40a 100644 --- a/docs/api/core.trainers.trl.html +++ b/docs/api/core.trainers.trl.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/core.training_args.html b/docs/api/core.training_args.html index 1e3d86203..b6346fd63 100644 --- a/docs/api/core.training_args.html +++ b/docs/api/core.training_args.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/datasets.html b/docs/api/datasets.html index 8268d7662..33a209a2f 100644 --- a/docs/api/datasets.html +++ b/docs/api/datasets.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/evaluate.html b/docs/api/evaluate.html index d4e15f438..7f1f0f9ac 100644 --- a/docs/api/evaluate.html +++ b/docs/api/evaluate.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/index.html b/docs/api/index.html index 6fff03944..6cdbd6457 100644 --- a/docs/api/index.html +++ b/docs/api/index.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/integrations.base.html b/docs/api/integrations.base.html index be5dea2bf..efb9f82de 100644 --- a/docs/api/integrations.base.html +++ b/docs/api/integrations.base.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/integrations.cut_cross_entropy.args.html b/docs/api/integrations.cut_cross_entropy.args.html index a15bf27b5..f409c4f9c 100644 --- a/docs/api/integrations.cut_cross_entropy.args.html +++ b/docs/api/integrations.cut_cross_entropy.args.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/integrations.grokfast.optimizer.html b/docs/api/integrations.grokfast.optimizer.html index 0a6b255ca..a184dd8a5 100644 --- a/docs/api/integrations.grokfast.optimizer.html +++ b/docs/api/integrations.grokfast.optimizer.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/integrations.kd.trainer.html b/docs/api/integrations.kd.trainer.html index a6d30612b..ab3f51afd 100644 --- a/docs/api/integrations.kd.trainer.html +++ b/docs/api/integrations.kd.trainer.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/integrations.liger.args.html b/docs/api/integrations.liger.args.html index b0882ca2c..337cfac62 100644 --- a/docs/api/integrations.liger.args.html +++ b/docs/api/integrations.liger.args.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/integrations.lm_eval.args.html b/docs/api/integrations.lm_eval.args.html index 0ed134b49..db1165b9f 100644 --- a/docs/api/integrations.lm_eval.args.html +++ b/docs/api/integrations.lm_eval.args.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/integrations.spectrum.args.html b/docs/api/integrations.spectrum.args.html index 785bc1750..e8e3421e9 100644 --- a/docs/api/integrations.spectrum.args.html +++ b/docs/api/integrations.spectrum.args.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/kernels.geglu.html b/docs/api/kernels.geglu.html index b500fa979..84aaea083 100644 --- a/docs/api/kernels.geglu.html +++ b/docs/api/kernels.geglu.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/kernels.lora.html b/docs/api/kernels.lora.html index d295575de..34cc1e230 100644 --- a/docs/api/kernels.lora.html +++ b/docs/api/kernels.lora.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/kernels.quantize.html b/docs/api/kernels.quantize.html index d91f9fa21..29cbcb1c6 100644 --- a/docs/api/kernels.quantize.html +++ b/docs/api/kernels.quantize.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/kernels.swiglu.html b/docs/api/kernels.swiglu.html index c452b545d..42953638b 100644 --- a/docs/api/kernels.swiglu.html +++ b/docs/api/kernels.swiglu.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/kernels.utils.html b/docs/api/kernels.utils.html index 285e9ebf5..0d9bf089b 100644 --- a/docs/api/kernels.utils.html +++ b/docs/api/kernels.utils.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/logging_config.html b/docs/api/logging_config.html index 3054d2756..b0ab449af 100644 --- a/docs/api/logging_config.html +++ b/docs/api/logging_config.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/models.mamba.modeling_mamba.html b/docs/api/models.mamba.modeling_mamba.html index 637b8a3ae..339454300 100644 --- a/docs/api/models.mamba.modeling_mamba.html +++ b/docs/api/models.mamba.modeling_mamba.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/monkeypatch.attention.mllama.html b/docs/api/monkeypatch.attention.mllama.html index 47d9a67de..e7c5791c9 100644 --- a/docs/api/monkeypatch.attention.mllama.html +++ b/docs/api/monkeypatch.attention.mllama.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/monkeypatch.btlm_attn_hijack_flash.html b/docs/api/monkeypatch.btlm_attn_hijack_flash.html index 6bcbc73a3..4109dbe65 100644 --- a/docs/api/monkeypatch.btlm_attn_hijack_flash.html +++ b/docs/api/monkeypatch.btlm_attn_hijack_flash.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/monkeypatch.data.batch_dataset_fetcher.html b/docs/api/monkeypatch.data.batch_dataset_fetcher.html index 1e344f3a7..d8e5c91e3 100644 --- a/docs/api/monkeypatch.data.batch_dataset_fetcher.html +++ b/docs/api/monkeypatch.data.batch_dataset_fetcher.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/monkeypatch.llama_attn_hijack_flash.html b/docs/api/monkeypatch.llama_attn_hijack_flash.html index 930e67cb5..5980029a6 100644 --- a/docs/api/monkeypatch.llama_attn_hijack_flash.html +++ b/docs/api/monkeypatch.llama_attn_hijack_flash.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/monkeypatch.llama_attn_hijack_xformers.html b/docs/api/monkeypatch.llama_attn_hijack_xformers.html index 9fc6ab749..29f51fcab 100644 --- a/docs/api/monkeypatch.llama_attn_hijack_xformers.html +++ b/docs/api/monkeypatch.llama_attn_hijack_xformers.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/monkeypatch.llama_expand_mask.html b/docs/api/monkeypatch.llama_expand_mask.html index 423ca56a7..fd38f9319 100644 --- a/docs/api/monkeypatch.llama_expand_mask.html +++ b/docs/api/monkeypatch.llama_expand_mask.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/monkeypatch.llama_patch_multipack.html b/docs/api/monkeypatch.llama_patch_multipack.html index d0468e54f..568b50299 100644 --- a/docs/api/monkeypatch.llama_patch_multipack.html +++ b/docs/api/monkeypatch.llama_patch_multipack.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/monkeypatch.lora_kernels.html b/docs/api/monkeypatch.lora_kernels.html index 7a8a69073..03221612c 100644 --- a/docs/api/monkeypatch.lora_kernels.html +++ b/docs/api/monkeypatch.lora_kernels.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/monkeypatch.mistral_attn_hijack_flash.html b/docs/api/monkeypatch.mistral_attn_hijack_flash.html index c51e01d7f..6308c624e 100644 --- a/docs/api/monkeypatch.mistral_attn_hijack_flash.html +++ b/docs/api/monkeypatch.mistral_attn_hijack_flash.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/monkeypatch.mixtral.html b/docs/api/monkeypatch.mixtral.html index a83097952..4948db429 100644 --- a/docs/api/monkeypatch.mixtral.html +++ b/docs/api/monkeypatch.mixtral.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/monkeypatch.multipack.html b/docs/api/monkeypatch.multipack.html index ad647d98f..44a6cb5d4 100644 --- a/docs/api/monkeypatch.multipack.html +++ b/docs/api/monkeypatch.multipack.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/monkeypatch.relora.html b/docs/api/monkeypatch.relora.html index 659320f90..41b24b4e2 100644 --- a/docs/api/monkeypatch.relora.html +++ b/docs/api/monkeypatch.relora.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/monkeypatch.stablelm_attn_hijack_flash.html b/docs/api/monkeypatch.stablelm_attn_hijack_flash.html index d7ec767e8..c6270de3c 100644 --- a/docs/api/monkeypatch.stablelm_attn_hijack_flash.html +++ b/docs/api/monkeypatch.stablelm_attn_hijack_flash.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/monkeypatch.trainer_fsdp_optim.html b/docs/api/monkeypatch.trainer_fsdp_optim.html index f24cdabb9..db024b151 100644 --- a/docs/api/monkeypatch.trainer_fsdp_optim.html +++ b/docs/api/monkeypatch.trainer_fsdp_optim.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/monkeypatch.transformers_fa_utils.html b/docs/api/monkeypatch.transformers_fa_utils.html index 7ab280459..d3effa51e 100644 --- a/docs/api/monkeypatch.transformers_fa_utils.html +++ b/docs/api/monkeypatch.transformers_fa_utils.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/monkeypatch.unsloth_.html b/docs/api/monkeypatch.unsloth_.html index 536a2f3b3..272278f33 100644 --- a/docs/api/monkeypatch.unsloth_.html +++ b/docs/api/monkeypatch.unsloth_.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/monkeypatch.utils.html b/docs/api/monkeypatch.utils.html index a1b2e8e1e..45e8b011a 100644 --- a/docs/api/monkeypatch.utils.html +++ b/docs/api/monkeypatch.utils.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.alpaca_chat.html b/docs/api/prompt_strategies.alpaca_chat.html index 8ee471672..e68374cb3 100644 --- a/docs/api/prompt_strategies.alpaca_chat.html +++ b/docs/api/prompt_strategies.alpaca_chat.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.alpaca_instruct.html b/docs/api/prompt_strategies.alpaca_instruct.html index ae8e9ccdf..9f42b43b3 100644 --- a/docs/api/prompt_strategies.alpaca_instruct.html +++ b/docs/api/prompt_strategies.alpaca_instruct.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.alpaca_w_system.html b/docs/api/prompt_strategies.alpaca_w_system.html index 955def9a2..53723e584 100644 --- a/docs/api/prompt_strategies.alpaca_w_system.html +++ b/docs/api/prompt_strategies.alpaca_w_system.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.base.html b/docs/api/prompt_strategies.base.html index c74906dc7..ed9047270 100644 --- a/docs/api/prompt_strategies.base.html +++ b/docs/api/prompt_strategies.base.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.bradley_terry.llama3.html b/docs/api/prompt_strategies.bradley_terry.llama3.html index 258a73d8a..b9632aaa8 100644 --- a/docs/api/prompt_strategies.bradley_terry.llama3.html +++ b/docs/api/prompt_strategies.bradley_terry.llama3.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.chat_template.html b/docs/api/prompt_strategies.chat_template.html index af09c1436..fad040a0d 100644 --- a/docs/api/prompt_strategies.chat_template.html +++ b/docs/api/prompt_strategies.chat_template.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.completion.html b/docs/api/prompt_strategies.completion.html index a56a48cd7..e9b06952f 100644 --- a/docs/api/prompt_strategies.completion.html +++ b/docs/api/prompt_strategies.completion.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.dpo.chat_template.html b/docs/api/prompt_strategies.dpo.chat_template.html index b90d665ec..883ebaa85 100644 --- a/docs/api/prompt_strategies.dpo.chat_template.html +++ b/docs/api/prompt_strategies.dpo.chat_template.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.dpo.chatml.html b/docs/api/prompt_strategies.dpo.chatml.html index b1916504c..2671d2ed3 100644 --- a/docs/api/prompt_strategies.dpo.chatml.html +++ b/docs/api/prompt_strategies.dpo.chatml.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.dpo.llama3.html b/docs/api/prompt_strategies.dpo.llama3.html index e1ab03b25..6be6ccd33 100644 --- a/docs/api/prompt_strategies.dpo.llama3.html +++ b/docs/api/prompt_strategies.dpo.llama3.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.dpo.passthrough.html b/docs/api/prompt_strategies.dpo.passthrough.html index 1893d4c53..3a80b4e58 100644 --- a/docs/api/prompt_strategies.dpo.passthrough.html +++ b/docs/api/prompt_strategies.dpo.passthrough.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.dpo.user_defined.html b/docs/api/prompt_strategies.dpo.user_defined.html index 8b2307779..d1562a8cc 100644 --- a/docs/api/prompt_strategies.dpo.user_defined.html +++ b/docs/api/prompt_strategies.dpo.user_defined.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.dpo.zephyr.html b/docs/api/prompt_strategies.dpo.zephyr.html index dc1feed20..ba5a5bcd3 100644 --- a/docs/api/prompt_strategies.dpo.zephyr.html +++ b/docs/api/prompt_strategies.dpo.zephyr.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.input_output.html b/docs/api/prompt_strategies.input_output.html index 634ff5366..b76c34619 100644 --- a/docs/api/prompt_strategies.input_output.html +++ b/docs/api/prompt_strategies.input_output.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.kto.chatml.html b/docs/api/prompt_strategies.kto.chatml.html index 73b6f2a9c..1fb58680d 100644 --- a/docs/api/prompt_strategies.kto.chatml.html +++ b/docs/api/prompt_strategies.kto.chatml.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.kto.llama3.html b/docs/api/prompt_strategies.kto.llama3.html index b7aaf6333..5f421eaa2 100644 --- a/docs/api/prompt_strategies.kto.llama3.html +++ b/docs/api/prompt_strategies.kto.llama3.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.kto.user_defined.html b/docs/api/prompt_strategies.kto.user_defined.html index 23d3fbabd..4f47b7d83 100644 --- a/docs/api/prompt_strategies.kto.user_defined.html +++ b/docs/api/prompt_strategies.kto.user_defined.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.llama2_chat.html b/docs/api/prompt_strategies.llama2_chat.html index 969ed09ca..9e848b8a7 100644 --- a/docs/api/prompt_strategies.llama2_chat.html +++ b/docs/api/prompt_strategies.llama2_chat.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.messages.chat.html b/docs/api/prompt_strategies.messages.chat.html index 4266c83c0..b6139c264 100644 --- a/docs/api/prompt_strategies.messages.chat.html +++ b/docs/api/prompt_strategies.messages.chat.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.metharme.html b/docs/api/prompt_strategies.metharme.html index 19dbd64c9..ce1ab4de5 100644 --- a/docs/api/prompt_strategies.metharme.html +++ b/docs/api/prompt_strategies.metharme.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.orcamini.html b/docs/api/prompt_strategies.orcamini.html index 2a3eb0f27..063a1d8ba 100644 --- a/docs/api/prompt_strategies.orcamini.html +++ b/docs/api/prompt_strategies.orcamini.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.orpo.chat_template.html b/docs/api/prompt_strategies.orpo.chat_template.html index 63502765a..0ce15ef17 100644 --- a/docs/api/prompt_strategies.orpo.chat_template.html +++ b/docs/api/prompt_strategies.orpo.chat_template.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.pygmalion.html b/docs/api/prompt_strategies.pygmalion.html index 68a2875b9..ab25d821c 100644 --- a/docs/api/prompt_strategies.pygmalion.html +++ b/docs/api/prompt_strategies.pygmalion.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.stepwise_supervised.html b/docs/api/prompt_strategies.stepwise_supervised.html index be2534e98..770e3f6fe 100644 --- a/docs/api/prompt_strategies.stepwise_supervised.html +++ b/docs/api/prompt_strategies.stepwise_supervised.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_strategies.user_defined.html b/docs/api/prompt_strategies.user_defined.html index ce81ea362..c1e3c9ab0 100644 --- a/docs/api/prompt_strategies.user_defined.html +++ b/docs/api/prompt_strategies.user_defined.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/prompt_tokenizers.html b/docs/api/prompt_tokenizers.html index fc0cf52ce..16f1fd5d1 100644 --- a/docs/api/prompt_tokenizers.html +++ b/docs/api/prompt_tokenizers.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/train.html b/docs/api/train.html index 9d9a7e8be..d6be486dd 100644 --- a/docs/api/train.html +++ b/docs/api/train.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.bench.html b/docs/api/utils.bench.html index b80c9df80..7067d37b0 100644 --- a/docs/api/utils.bench.html +++ b/docs/api/utils.bench.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.callbacks.comet_.html b/docs/api/utils.callbacks.comet_.html index 9dca8531f..6549a5fe6 100644 --- a/docs/api/utils.callbacks.comet_.html +++ b/docs/api/utils.callbacks.comet_.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.callbacks.lisa.html b/docs/api/utils.callbacks.lisa.html index 34fb48edb..a769c3617 100644 --- a/docs/api/utils.callbacks.lisa.html +++ b/docs/api/utils.callbacks.lisa.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/utils.callbacks.mlflow_.html b/docs/api/utils.callbacks.mlflow_.html index 9150542c3..daf9efd28 100644 --- a/docs/api/utils.callbacks.mlflow_.html +++ b/docs/api/utils.callbacks.mlflow_.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.callbacks.perplexity.html b/docs/api/utils.callbacks.perplexity.html index ea7ed6f87..a3434ff12 100644 --- a/docs/api/utils.callbacks.perplexity.html +++ b/docs/api/utils.callbacks.perplexity.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.callbacks.profiler.html b/docs/api/utils.callbacks.profiler.html index ffa399392..d505711bb 100644 --- a/docs/api/utils.callbacks.profiler.html +++ b/docs/api/utils.callbacks.profiler.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.chat_templates.html b/docs/api/utils.chat_templates.html index 77ea51004..d6e83b72b 100644 --- a/docs/api/utils.chat_templates.html +++ b/docs/api/utils.chat_templates.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.collators.batching.html b/docs/api/utils.collators.batching.html index e6c8b6a84..9b9e0ee28 100644 --- a/docs/api/utils.collators.batching.html +++ b/docs/api/utils.collators.batching.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.collators.core.html b/docs/api/utils.collators.core.html index ea55611e2..ff6950b91 100644 --- a/docs/api/utils.collators.core.html +++ b/docs/api/utils.collators.core.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/utils.collators.mamba.html b/docs/api/utils.collators.mamba.html index d686d114d..d836f8a83 100644 --- a/docs/api/utils.collators.mamba.html +++ b/docs/api/utils.collators.mamba.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.collators.mm_chat.html b/docs/api/utils.collators.mm_chat.html index fa390e554..4353cc9fa 100644 --- a/docs/api/utils.collators.mm_chat.html +++ b/docs/api/utils.collators.mm_chat.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.data.pretraining.html b/docs/api/utils.data.pretraining.html index ddeb7ed00..bb3d4daf1 100644 --- a/docs/api/utils.data.pretraining.html +++ b/docs/api/utils.data.pretraining.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/utils.data.sft.html b/docs/api/utils.data.sft.html index f0db8ca23..4f940fc56 100644 --- a/docs/api/utils.data.sft.html +++ b/docs/api/utils.data.sft.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/api/utils.dict.html b/docs/api/utils.dict.html index ed89f011d..8ede7a553 100644 --- a/docs/api/utils.dict.html +++ b/docs/api/utils.dict.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.distributed.html b/docs/api/utils.distributed.html index 56f6e52cc..95a738670 100644 --- a/docs/api/utils.distributed.html +++ b/docs/api/utils.distributed.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.freeze.html b/docs/api/utils.freeze.html index 13dc618ed..d763dd39c 100644 --- a/docs/api/utils.freeze.html +++ b/docs/api/utils.freeze.html @@ -355,6 +355,12 @@ window.Quarto = { LoRA Optimizations + + diff --git a/docs/api/utils.gradient_checkpointing.unsloth.html b/docs/api/utils.gradient_checkpointing.unsloth.html index 75de3b76b..2587d3e5f 100644 --- a/docs/api/utils.gradient_checkpointing.unsloth.html +++ b/docs/api/utils.gradient_checkpointing.unsloth.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.lora.html b/docs/api/utils.lora.html index c804e76b9..aff9e3727 100644 --- a/docs/api/utils.lora.html +++ b/docs/api/utils.lora.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.lora_embeddings.html b/docs/api/utils.lora_embeddings.html index d6ed0cfb5..a8e7d2dda 100644 --- a/docs/api/utils.lora_embeddings.html +++ b/docs/api/utils.lora_embeddings.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.model_shard_quant.html b/docs/api/utils.model_shard_quant.html index e09322f93..24add1197 100644 --- a/docs/api/utils.model_shard_quant.html +++ b/docs/api/utils.model_shard_quant.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.models.html b/docs/api/utils.models.html index 0b3d4c474..109149347 100644 --- a/docs/api/utils.models.html +++ b/docs/api/utils.models.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.optimizers.adopt.html b/docs/api/utils.optimizers.adopt.html index 87ea46a18..c9d847201 100644 --- a/docs/api/utils.optimizers.adopt.html +++ b/docs/api/utils.optimizers.adopt.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.samplers.multipack.html b/docs/api/utils.samplers.multipack.html index 036fa9f8f..5e4f7138c 100644 --- a/docs/api/utils.samplers.multipack.html +++ b/docs/api/utils.samplers.multipack.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.schedulers.html b/docs/api/utils.schedulers.html index 0fac3be79..7063aa134 100644 --- a/docs/api/utils.schedulers.html +++ b/docs/api/utils.schedulers.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.schemas.config.html b/docs/api/utils.schemas.config.html index ff9fab79a..4312bc816 100644 --- a/docs/api/utils.schemas.config.html +++ b/docs/api/utils.schemas.config.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.schemas.datasets.html b/docs/api/utils.schemas.datasets.html index 104645616..3b4586b3d 100644 --- a/docs/api/utils.schemas.datasets.html +++ b/docs/api/utils.schemas.datasets.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.schemas.enums.html b/docs/api/utils.schemas.enums.html index ca697ea21..39992f8bd 100644 --- a/docs/api/utils.schemas.enums.html +++ b/docs/api/utils.schemas.enums.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.schemas.integrations.html b/docs/api/utils.schemas.integrations.html index f553b403e..e48722772 100644 --- a/docs/api/utils.schemas.integrations.html +++ b/docs/api/utils.schemas.integrations.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.schemas.model.html b/docs/api/utils.schemas.model.html index be55aa670..a3ee47d50 100644 --- a/docs/api/utils.schemas.model.html +++ b/docs/api/utils.schemas.model.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.schemas.multimodal.html b/docs/api/utils.schemas.multimodal.html index 7f2f06bd0..61bec7c99 100644 --- a/docs/api/utils.schemas.multimodal.html +++ b/docs/api/utils.schemas.multimodal.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.schemas.peft.html b/docs/api/utils.schemas.peft.html index 41eed8481..7f36f30b6 100644 --- a/docs/api/utils.schemas.peft.html +++ b/docs/api/utils.schemas.peft.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.schemas.training.html b/docs/api/utils.schemas.training.html index 536b8d3f9..bf934d0c4 100644 --- a/docs/api/utils.schemas.training.html +++ b/docs/api/utils.schemas.training.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.schemas.trl.html b/docs/api/utils.schemas.trl.html index 6af7a5f3f..23dfda068 100644 --- a/docs/api/utils.schemas.trl.html +++ b/docs/api/utils.schemas.trl.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.schemas.utils.html b/docs/api/utils.schemas.utils.html index da7575f62..89d1a17fd 100644 --- a/docs/api/utils.schemas.utils.html +++ b/docs/api/utils.schemas.utils.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.tokenization.html b/docs/api/utils.tokenization.html index 77499141d..a5424ac9d 100644 --- a/docs/api/utils.tokenization.html +++ b/docs/api/utils.tokenization.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/api/utils.trainer.html b/docs/api/utils.trainer.html index 146a0a30f..086f0953c 100644 --- a/docs/api/utils.trainer.html +++ b/docs/api/utils.trainer.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/batch_vs_grad.html b/docs/batch_vs_grad.html index 8b3b73d2a..071471d54 100644 --- a/docs/batch_vs_grad.html +++ b/docs/batch_vs_grad.html @@ -293,6 +293,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/cli.html b/docs/cli.html index 939e27a61..0e1b71084 100644 --- a/docs/cli.html +++ b/docs/cli.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/config.html b/docs/config.html index 4cb23105e..a291f504a 100644 --- a/docs/config.html +++ b/docs/config.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + @@ -569,7 +575,7 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`) name: # Optional[str] name of dataset configuration to load - train_on_split: train # Optional[str] name of dataset split to load from + split: train # Optional[str] name of dataset split to load from revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets. trust_remote_code: # Optional[bool] Trust remote code for untrusted source @@ -625,547 +631,549 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin content: value # ... - # Optional[Dict[str, List]]. Roles mapping in the messages. The default is: - roles: - user: ["human", "user"] - assistant: ["gpt", "assistant"] - system: ["system"] - tool: ["tool"] - - # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template. - # This does not drop the default system message from chat_template if it exists. If you wish to, - # we recommend using a custom jinja template with the default system message removed or - # adding a system turn with empty content. - drop_system_message: - - # IMPORTANT: The following fields determine which parts of the conversation to train on. - # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train - # See examples at `docs/dataset-formats/conversation.qmd` - # Note: If the below 4 fields are set to empty, defaults to training only on the last message. - - # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss. - roles_to_train: ["assistant"] # default - # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are: - # - all: train on all EOS tokens - # - turn (default): train on the EOS token at the end of each trainable turn - # - last: train on the last EOS token in the conversation - # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`. - train_on_eos: last - # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`. - message_field_training: training - # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn. - # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train). - message_field_training_detail: train_detail - - -# If false, the datasets will not be shuffled and will keep their original order in `datasets`. -# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true. -shuffle_merged_datasets: true - -Deduplicates datasets and test_datasets with identical entries. -dataset_exact_deduplication: true - -# A list of one or more datasets to eval the model with. -# You can use either test_datasets, or val_set_size, but not both. -test_datasets: - - path: /workspace/data/eval.jsonl - ds_type: json - # You need to specify a split. For "json" datasets the default split is called "train". - split: train - type: completion - data_files: - - /workspace/data/eval.jsonl - -# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo' -rl: -rl_beta: # Optional[float]. The beta parameter for the RL training. - -# dpo -dpo_use_weighting: # Optional[bool]. Whether to perform weighting. -rpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper. - -# orpo -orpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping. - -# kto -kto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss. -kto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss. - -# simpo -cpo_alpha: 1.0 # Weight of the BC regularizer -simpo_gamma: 0.5 # Target reward margin for the SimPO loss - -# grpo -trl: - use_vllm: # Optional[bool]. Whether to use VLLM for RL training. - vllm_server_host: # Optional[str]. Host of the vLLM server to connect to. - vllm_server_port: # Optional[int]. Port of the vLLM server to connect to. - vllm_server_timeout: # Optional[int]. Total timeout (in seconds) to wait for the vLLM server to respond. - vllm_guided_decoding_regex: # Optional[str]. Regex for vLLM guided decoding. - - beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use - max_completion_length: # Optional[int]. Maximum length of the completion for RL training. - - reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir. - reward_weights: # Optional[list[float]]. List of reward weights for the reward functions. - - num_generations: # Optional[int]. Number of generations to sample. - log_completions: # Optional[bool]. Whether to log completions. - - sync_ref_model: # Optional[bool]. Whether to sync the reference model. - ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model. - ref_model_sync_steps: # Optional[int]. Sync steps for the reference model. - - -# reward modelling: `True` or `False` -reward_model: - -# process reward modelling: `True` or `False` -process_reward_model: - -# The name of the chat template to use for training, following values are supported: -# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value. -# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py -# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer. -# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field. -# The selected chat template will be saved to the tokenizer_config.json for easier inferencing -# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template. -chat_template: tokenizer_default -# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null. -chat_template_jinja: null -# Changes the default system message. Currently only supports chatml. -default_system_message: You are a helpful assistant. Please give a long and detailed answer. -# Axolotl attempts to save the dataset as an arrow after packing the data together so -# subsequent training attempts load faster, relative path -dataset_prepared_path: data/last_run_prepared -# Push prepared dataset to hub -push_dataset_to_hub: # Optional[str] repo_org/repo_name -# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()` -# if not set. -dataset_processes: # defaults to os.cpu_count() if not set -# Keep dataset in memory while preprocessing -# Only needed if cached dataset is taking too much storage -dataset_keep_in_memory: -# push checkpoints to hub -hub_model_id: # private repo path to push finetuned model -# how to push checkpoints to hub -# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy -hub_strategy: -# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets -# Required to be true when used in combination with `push_dataset_to_hub` -hf_use_auth_token: # boolean -# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval. -val_set_size: 0.04 -# Num shards for whole dataset -dataset_shard_num: -# Index of shard to use for whole dataset -dataset_shard_idx: - -# The maximum length of an input to train with, this should typically be less than 2048 -# as most models have a token/context limit of 2048 -sequence_len: 2048 -# Pad inputs so each step uses constant sized buffers -# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently -pad_to_sequence_len: -# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true' -sample_packing: -# Set to 'false' if getting errors during eval with sample_packing on. -eval_sample_packing: -# You can set these packing optimizations AFTER starting a training at least once. -# The trainer will provide recommended values for these values. -sample_packing_eff_est: -total_num_tokens: -# Increasing the following values helps with packing, but usually only slightly (<%1.) -# The number of samples packed at a time. -sample_packing_group_size: 100000 -# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples. -sample_packing_bin_size: 200 -sample_pack_sequentially: # Optional[bool]. Whether to pack samples sequentially. - -# whether to concatenate samples during pretraining -pretraining_sample_concatenation: - -curriculum_sampling: # Optional[bool]. Whether to use sequential sampling for curriculum learning + # Optional[Dict[str, List]]. Roles mapping in the messages. + # The format is {target_role: [source_roles]}. All source roles will be mapped to the target role. + # The default is: + roles: + user: ["human", "user"] + assistant: ["gpt", "assistant"] + system: ["system"] + tool: ["tool"] + + # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template. + # This does not drop the default system message from chat_template if it exists. If you wish to, + # we recommend using a custom jinja template with the default system message removed or + # adding a system turn with empty content. + drop_system_message: + + # IMPORTANT: The following fields determine which parts of the conversation to train on. + # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train + # See examples at `docs/dataset-formats/conversation.qmd` + # Note: If the below 4 fields are set to empty, defaults to training only on the last message. + + # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss. + roles_to_train: ["assistant"] # default + # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are: + # - all: train on all EOS tokens + # - turn (default): train on the EOS token at the end of each trainable turn + # - last: train on the last EOS token in the conversation + # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`. + train_on_eos: last + # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`. + message_field_training: training + # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn. + # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train). + message_field_training_detail: train_detail + + +# If false, the datasets will not be shuffled and will keep their original order in `datasets`. +# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true. +shuffle_merged_datasets: true + +Deduplicates datasets and test_datasets with identical entries. +dataset_exact_deduplication: true + +# A list of one or more datasets to eval the model with. +# You can use either test_datasets, or val_set_size, but not both. +test_datasets: + - path: /workspace/data/eval.jsonl + ds_type: json + # You need to specify a split. For "json" datasets the default split is called "train". + split: train + type: completion + data_files: + - /workspace/data/eval.jsonl + +# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo' +rl: +rl_beta: # Optional[float]. The beta parameter for the RL training. + +# dpo +dpo_use_weighting: # Optional[bool]. Whether to perform weighting. +rpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper. + +# orpo +orpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping. + +# kto +kto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss. +kto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss. + +# simpo +cpo_alpha: 1.0 # Weight of the BC regularizer +simpo_gamma: 0.5 # Target reward margin for the SimPO loss + +# grpo +trl: + use_vllm: # Optional[bool]. Whether to use VLLM for RL training. + vllm_server_host: # Optional[str]. Host of the vLLM server to connect to. + vllm_server_port: # Optional[int]. Port of the vLLM server to connect to. + vllm_server_timeout: # Optional[int]. Total timeout (in seconds) to wait for the vLLM server to respond. + vllm_guided_decoding_regex: # Optional[str]. Regex for vLLM guided decoding. + + beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use + max_completion_length: # Optional[int]. Maximum length of the completion for RL training. + + reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir. + reward_weights: # Optional[list[float]]. List of reward weights for the reward functions. + + num_generations: # Optional[int]. Number of generations to sample. + log_completions: # Optional[bool]. Whether to log completions. + + sync_ref_model: # Optional[bool]. Whether to sync the reference model. + ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model. + ref_model_sync_steps: # Optional[int]. Sync steps for the reference model. + + +# reward modelling: `True` or `False` +reward_model: + +# process reward modelling: `True` or `False` +process_reward_model: + +# The name of the chat template to use for training, following values are supported: +# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value. +# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py +# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer. +# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field. +# The selected chat template will be saved to the tokenizer_config.json for easier inferencing +# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template. +chat_template: tokenizer_default +# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null. +chat_template_jinja: null +# Changes the default system message. Currently only supports chatml. +default_system_message: You are a helpful assistant. Please give a long and detailed answer. +# Axolotl attempts to save the dataset as an arrow after packing the data together so +# subsequent training attempts load faster, relative path +dataset_prepared_path: data/last_run_prepared +# Push prepared dataset to hub +push_dataset_to_hub: # Optional[str] repo_org/repo_name +# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()` +# if not set. +dataset_processes: # defaults to os.cpu_count() if not set +# Keep dataset in memory while preprocessing +# Only needed if cached dataset is taking too much storage +dataset_keep_in_memory: +# push checkpoints to hub +hub_model_id: # private repo path to push finetuned model +# how to push checkpoints to hub +# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy +hub_strategy: +# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets +# Required to be true when used in combination with `push_dataset_to_hub` +hf_use_auth_token: # boolean +# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval. +val_set_size: 0.04 +# Num shards for whole dataset +dataset_shard_num: +# Index of shard to use for whole dataset +dataset_shard_idx: + +# The maximum length of an input to train with, this should typically be less than 2048 +# as most models have a token/context limit of 2048 +sequence_len: 2048 +# Pad inputs so each step uses constant sized buffers +# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently +pad_to_sequence_len: +# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true' +sample_packing: +# Set to 'false' if getting errors during eval with sample_packing on. +eval_sample_packing: +# You can set these packing optimizations AFTER starting a training at least once. +# The trainer will provide recommended values for these values. +sample_packing_eff_est: +total_num_tokens: +# Increasing the following values helps with packing, but usually only slightly (<%1.) +# The number of samples packed at a time. +sample_packing_group_size: 100000 +# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples. +sample_packing_bin_size: 200 +sample_pack_sequentially: # Optional[bool]. Whether to pack samples sequentially. + +# whether to concatenate samples during pretraining +pretraining_sample_concatenation: -# Use batch flattening for speedups when not using sample_packing -batch_flattening: - -# Passed through to transformers when loading the model when launched without accelerate -# Use `sequential` when training w/ model parallelism to limit memory -device_map: -# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model. -max_memory: - -# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model -adapter: lora -# If you already have a lora model trained that you want to load, put that here. -# This means after training, if you want to test the model, you should set this to the value of `output_dir`. -# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`. -lora_model_dir: - -# LoRA hyperparameters -# For more details about the following options, see: -# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2 -lora_r: 8 -lora_alpha: 16 -lora_dropout: 0.05 -lora_target_modules: - - q_proj - - v_proj -# - k_proj -# - o_proj -# - gate_proj -# - down_proj -# - up_proj -lora_target_linear: # If true, will target all linear modules - -# List[int] | int. # The layer indices to transform, otherwise, apply to all layers -# https://huggingface.co/docs/peft/v0.15.0/en/package_reference/lora#peft.LoraConfig.layers_to_transform -peft_layers_to_transform: - -# Optional[bool]. Whether to use DoRA. -# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#weight-decomposed-low-rank-adaptation-dora -peft_use_dora: - -# Optional[bool]. Whether to use RSLoRA. -# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#rank-stabilized-lora -peft_use_rslora: - -# Optional[list[tuple[int, int]]]. List of layer indices to replicate. -# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#memory-efficient-layer-replication-with-lora -peft_layer_replication: - -# bool | Literal["gaussian", "eva", "olora", "pissa", "pissa_niter_[number of iters]", "corda", "loftq"] -# How to initialize LoRA weights. Default to True which is MS original implementation. -# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#initialization -peft_init_lora_weights: - -# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens. -# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models. -# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities. -# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994 -lora_modules_to_save: -# - embed_tokens -# - lm_head - -lora_fan_in_fan_out: false +curriculum_sampling: # Optional[bool]. Whether to use sequential sampling for curriculum learning + +# Use batch flattening for speedups when not using sample_packing +batch_flattening: + +# Passed through to transformers when loading the model when launched without accelerate +# Use `sequential` when training w/ model parallelism to limit memory +device_map: +# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model. +max_memory: + +# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model +adapter: lora +# If you already have a lora model trained that you want to load, put that here. +# This means after training, if you want to test the model, you should set this to the value of `output_dir`. +# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`. +lora_model_dir: + +# LoRA hyperparameters +# For more details about the following options, see: +# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2 +lora_r: 8 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_modules: + - q_proj + - v_proj +# - k_proj +# - o_proj +# - gate_proj +# - down_proj +# - up_proj +lora_target_linear: # If true, will target all linear modules + +# List[int] | int. # The layer indices to transform, otherwise, apply to all layers +# https://huggingface.co/docs/peft/v0.15.0/en/package_reference/lora#peft.LoraConfig.layers_to_transform +peft_layers_to_transform: + +# Optional[bool]. Whether to use DoRA. +# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#weight-decomposed-low-rank-adaptation-dora +peft_use_dora: + +# Optional[bool]. Whether to use RSLoRA. +# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#rank-stabilized-lora +peft_use_rslora: + +# Optional[list[tuple[int, int]]]. List of layer indices to replicate. +# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#memory-efficient-layer-replication-with-lora +peft_layer_replication: + +# bool | Literal["gaussian", "eva", "olora", "pissa", "pissa_niter_[number of iters]", "corda", "loftq"] +# How to initialize LoRA weights. Default to True which is MS original implementation. +# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#initialization +peft_init_lora_weights: + +# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens. +# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models. +# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities. +# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994 +lora_modules_to_save: +# - embed_tokens +# - lm_head -# Apply custom LoRA autograd functions and activation function Triton kernels for -# speed and memory savings -# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html -lora_mlp_kernel: true -lora_qkv_kernel: true -lora_o_kernel: true - -# LoRA+ hyperparameters -# For more details about the following options, see: -# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py` -loraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4. -loraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6. - -peft: - # Configuration options for loftq initialization for LoRA - # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization - loftq_config: - loftq_bits: # typically 4 bits - -# ReLoRA configuration -# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed -relora_steps: # Number of steps per ReLoRA restart -relora_warmup_steps: # Number of per-restart warmup steps -relora_anneal_steps: # Number of anneal steps for each relora cycle -relora_prune_ratio: # threshold for optimizer magnitude when pruning -relora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings - -# wandb configuration if you're using it -# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`. -wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb -wandb_project: # Your wandb project name -wandb_entity: # A wandb Team name if using a Team -wandb_watch: -wandb_name: # Set the name of your wandb run -wandb_run_id: # Set the ID of your wandb run -wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training - -# mlflow configuration if you're using it -mlflow_tracking_uri: # URI to mlflow -mlflow_experiment_name: # Your experiment name -mlflow_run_name: # Your run name -hf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry - -# Comet configuration if you're using it -# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`. -# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start -use_comet: # Enable or disable Comet integration. -comet_api_key: # API key for Comet. Recommended to set via `comet login`. -comet_workspace: # Workspace name in Comet. Defaults to the user's default workspace. -comet_project_name: # Project name in Comet. Defaults to Uncategorized. -comet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key. -comet_mode: # Create a new experiment ("create") or log to an existing one ("get"). Default ("get_or_create") auto-selects based on configuration. -comet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True. -comet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details. - -# Tensorboard -use_tensorboard: # Optional[bool] - -# Where to save the full-finetuned model to -output_dir: ./completed-model - -# Whether to use torch.compile and which backend to use -# setting to `auto` will enable torch compile when torch>=2.5.1 -torch_compile: # Optional[Union[Literal["auto"], bool]] -torch_compile_backend: # Optional[str] - -# Training hyperparameters +lora_fan_in_fan_out: false + +# Apply custom LoRA autograd functions and activation function Triton kernels for +# speed and memory savings +# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html +lora_mlp_kernel: true +lora_qkv_kernel: true +lora_o_kernel: true + +# LoRA+ hyperparameters +# For more details about the following options, see: +# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py` +loraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4. +loraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6. + +peft: + # Configuration options for loftq initialization for LoRA + # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization + loftq_config: + loftq_bits: # typically 4 bits + +# ReLoRA configuration +# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed +relora_steps: # Number of steps per ReLoRA restart +relora_warmup_steps: # Number of per-restart warmup steps +relora_anneal_steps: # Number of anneal steps for each relora cycle +relora_prune_ratio: # threshold for optimizer magnitude when pruning +relora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings + +# wandb configuration if you're using it +# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`. +wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb +wandb_project: # Your wandb project name +wandb_entity: # A wandb Team name if using a Team +wandb_watch: +wandb_name: # Set the name of your wandb run +wandb_run_id: # Set the ID of your wandb run +wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training + +# mlflow configuration if you're using it +mlflow_tracking_uri: # URI to mlflow +mlflow_experiment_name: # Your experiment name +mlflow_run_name: # Your run name +hf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry + +# Comet configuration if you're using it +# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`. +# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start +use_comet: # Enable or disable Comet integration. +comet_api_key: # API key for Comet. Recommended to set via `comet login`. +comet_workspace: # Workspace name in Comet. Defaults to the user's default workspace. +comet_project_name: # Project name in Comet. Defaults to Uncategorized. +comet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key. +comet_mode: # Create a new experiment ("create") or log to an existing one ("get"). Default ("get_or_create") auto-selects based on configuration. +comet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True. +comet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details. + +# Tensorboard +use_tensorboard: # Optional[bool] + +# Where to save the full-finetuned model to +output_dir: ./completed-model + +# Whether to use torch.compile and which backend to use +# setting to `auto` will enable torch compile when torch>=2.5.1 +torch_compile: # Optional[Union[Literal["auto"], bool]] +torch_compile_backend: # Optional[str] -# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps. -gradient_accumulation_steps: 1 -# The number of samples to include in each batch. This is the number of samples sent to each GPU. -# Batch size per gpu = micro_batch_size * gradient_accumulation_steps -micro_batch_size: 2 -eval_batch_size: -num_epochs: 4 -warmup_steps: 100 # cannot use with warmup_ratio -warmup_ratio: 0.05 # cannot use with warmup_steps -learning_rate: 0.00003 -lr_quadratic_warmup: -logging_steps: -eval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps -evals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps -eval_strategy: # Set to `"no"` to skip evaluation, `"epoch"` at end of each epoch, leave empty to infer from `eval_steps`. -save_strategy: # Set to `"no"` to skip checkpoint saves, `"epoch"` at end of each epoch, `"best"` when better result is achieved, leave empty to infer from `save_steps`. -save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps -saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps -save_total_limit: # Checkpoints saved at a time -# Maximum number of iterations to train for. It precedes num_epochs which means that -# if both are set, num_epochs will not be guaranteed. -# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps -max_steps: - -# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time. -include_tokens_per_second: # Optional[bool] - -# whether to find batch size that fits in memory. Passed to underlying transformers Trainer -auto_find_batch_size: # Optional[bool] - -eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0 -eval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128 -do_causal_lm_eval: # Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`. -eval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is ["sacrebleu", "comet", "ter", "chrf", "perplexity"] - -profiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir. - # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information - # snapshots can be visualized @ https://pytorch.org/memory_viz - -loss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training) -loss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3) - -# Save model as safetensors (require safetensors package) -save_safetensors: - -# Whether to mask out or include the human's prompt from the training labels -train_on_inputs: false -# Group similarly sized data to minimize padding. -# May be slower to start, as it must download and sort the entire dataset. -# Note that training loss may have an oscillating pattern with this enabled. -group_by_length: false - -# Whether to use gradient checkpointing. Available options are: true, false, "offload". -# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing -gradient_checkpointing: false -# additional kwargs to pass to the trainer for gradient checkpointing -# gradient_checkpointing_kwargs: -# use_reentrant: true - -# Stop training after this many evaluation losses have increased in a row -# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback -early_stopping_patience: 3 - -# Specify a scheduler and kwargs to use with the optimizer -lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine -lr_scheduler_kwargs: -cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr -cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf) - -# For one_cycle optim -lr_div_factor: # Learning rate div factor - -# Specify optimizer -# Valid values are driven by the Transformers OptimizerNames class, see: -# https://github.com/huggingface/transformers/blob/cbf924b76c03828101a34069a96d209314114fd5/src/transformers/training_args.py#L144-L189 -# -# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of -# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used -# in the examples/ for your model and fine-tuning use case. -# -# Valid values for 'optimizer' include: -# - adamw_torch -# - adamw_torch_fused -# - adamw_torch_xla -# - adamw_torch_npu_fused -# - adamw_apex_fused -# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1) -# - adafactor -# - adamw_anyprecision -# - adamw_torch_4bit -# - ademamix -# - sgd -# - adagrad -# - adamw_bnb_8bit -# - adamw_8bit # alias for adamw_bnb_8bit -# - ademamix_8bit -# - lion_8bit -# - lion_32bit -# - paged_adamw_32bit -# - paged_adamw_8bit -# - paged_ademamix_32bit -# - paged_ademamix_8bit -# - paged_lion_32bit -# - paged_lion_8bit -# - rmsprop -# - rmsprop_bnb -# - rmsprop_bnb_8bit -# - rmsprop_bnb_32bit -# - galore_adamw -# - galore_adamw_8bit -# - galore_adafactor -# - galore_adamw_layerwise -# - galore_adamw_8bit_layerwise -# - galore_adafactor_layerwise -# - lomo -# - adalomo -# - grokadamw -# - schedule_free_adamw -# - schedule_free_sgd -# - apollo_adamw -# - apollo_adamw_layerwise -# -# Additional custom optimizers include: -# - optimi_adamw -# - ao_adamw_8bit -# - ao_adamw_fp8 -optimizer: -# Dictionary of arguments to pass to the optimizer -optim_args: -# For Galore Optimizers the following optim_args are available -# rank: # type: int -# update_proj_gap # type: int -# scale # type: float -# proj_type: # type: str, default = std - -# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm -optim_target_modules: -# - self_attn # for llama -# - mlp - -# Specify weight decay -weight_decay: -# adamw hyperparams -adam_beta1: -adam_beta2: -adam_epsilon: -# Gradient clipping max norm -max_grad_norm: - -# Augmentation techniques -# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings -# currently only supported on Llama and Mistral -neftune_noise_alpha: - -# Optional[bool]. Whether to bettertransformers -flash_optimum: - -# Note: Only one of the following attention patches can be used at a time. -# For example, if you set `xformers_attention` to `true`, do not set `flash_attention` to `true`. - -# Optional[bool]. Whether to use xformers attention patch https://github.com/facebookresearch/xformers: -xformers_attention: -# Optional[bool]. Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention: -flash_attention: -flash_attn_cross_entropy: # Optional[bool]. Whether to use flash-attention cross entropy implementation - advanced use only -flash_attn_rms_norm: # Optional[bool]. Whether to use flash-attention rms norm implementation - advanced use only -flash_attn_fuse_qkv: # Optional[bool]. Whether to fuse QKV into a single operation -flash_attn_fuse_mlp: # Optional[bool]. Whether to fuse part of the MLP into a single operation -# Optional[bool]. Whether to use scaled-dot-product attention -# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html -sdp_attention: -# Optional[bool]. Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf -s2_attention: - -# Optional[bool]. Whether to use low_cpu_mem_usage -low_cpu_mem_usage: -# Optional[str]. Resume from a specific checkpoint dir -resume_from_checkpoint: -# Optional[bool]. If resume_from_checkpoint isn't set and you simply want it to start where it left off. -# Be careful with this being turned on between different models. -auto_resume_from_checkpoints: false - -## Multimodal section -# int | tuple[int, int] | None . Size to resize images to, width x height. -# Will read from model/processor config if not set. -image_size: -# str. Algorithm to use for image resizing. "bilinear", "bicubic", "lanczos". Default is "bilinear". -image_resize_algorithm: 'bilinear' -## End of multimodal section - -# Don't mess with this, it's here for accelerate and torchrun -local_rank: - -# Add or change special tokens. -# If you add tokens here, you don't need to add them to the `tokens` list. -special_tokens: - # bos_token: "<s>" - # eos_token: "</s>" - # unk_token: "<unk>" - # pad_token: "[PAD]" - -# Add extra tokens. -tokens: - -# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer. -# Only works for tokens that are not part of the base vocab (aka are added_tokens). -# Can be checked if they exist in tokenizer.json added_tokens. -added_tokens_overrides: # Dict[int, str] -# 128041: "<|im_start|>" -# 128042: "<|im_end|>" - -# FSDP -fsdp: -fsdp_config: - -# Deepspeed config path. e.g., deepspeed_configs/zero3.json -deepspeed: - -# Advanced DDP Arguments -ddp_timeout: -ddp_bucket_cap_mb: -ddp_broadcast_buffers: - -# Sequence parallelism -# Set to a divisor of the number of GPUs available to split sequences into chunks of equal size. -# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM. -# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized -# subsequences, or set to 4 to split into four equal-sized subsequences. -# See https://axolotl-ai-cloud.github.io/axolotl/docs/sequence_parallelism.html for more details. -sequence_parallel_degree: -# Optional; strides across the key dimension. Larger values use more memory but should make training faster. -# Must evenly divide the number of KV heads in your model. -heads_k_stride: 1 - -# Path to torch distx for optim 'adamw_anyprecision' -torchdistx_path: - -# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize -pretraining_dataset: - -# Debug mode -debug: - -# Seed -seed: - -# Allow overwrite yml config using from cli -strict: +# Training hyperparameters + +# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps. +gradient_accumulation_steps: 1 +# The number of samples to include in each batch. This is the number of samples sent to each GPU. +# Batch size per gpu = micro_batch_size * gradient_accumulation_steps +micro_batch_size: 2 +eval_batch_size: +num_epochs: 4 +warmup_steps: 100 # cannot use with warmup_ratio +warmup_ratio: 0.05 # cannot use with warmup_steps +learning_rate: 0.00003 +lr_quadratic_warmup: +logging_steps: +eval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps +evals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps +eval_strategy: # Set to `"no"` to skip evaluation, `"epoch"` at end of each epoch, leave empty to infer from `eval_steps`. +save_strategy: # Set to `"no"` to skip checkpoint saves, `"epoch"` at end of each epoch, `"best"` when better result is achieved, leave empty to infer from `save_steps`. +save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps +saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps +save_total_limit: # Checkpoints saved at a time +# Maximum number of iterations to train for. It precedes num_epochs which means that +# if both are set, num_epochs will not be guaranteed. +# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps +max_steps: + +# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time. +include_tokens_per_second: # Optional[bool] + +# whether to find batch size that fits in memory. Passed to underlying transformers Trainer +auto_find_batch_size: # Optional[bool] + +eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0 +eval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128 +do_causal_lm_eval: # Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`. +eval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is ["sacrebleu", "comet", "ter", "chrf", "perplexity"] + +profiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir. + # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information + # snapshots can be visualized @ https://pytorch.org/memory_viz + +loss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training) +loss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3) + +# Save model as safetensors (require safetensors package) +save_safetensors: + +# Whether to mask out or include the human's prompt from the training labels +train_on_inputs: false +# Group similarly sized data to minimize padding. +# May be slower to start, as it must download and sort the entire dataset. +# Note that training loss may have an oscillating pattern with this enabled. +group_by_length: false + +# Whether to use gradient checkpointing. Available options are: true, false, "offload". +# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing +gradient_checkpointing: false +# additional kwargs to pass to the trainer for gradient checkpointing +# gradient_checkpointing_kwargs: +# use_reentrant: true + +# Stop training after this many evaluation losses have increased in a row +# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback +early_stopping_patience: 3 + +# Specify a scheduler and kwargs to use with the optimizer +lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine +lr_scheduler_kwargs: +cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr +cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf) + +# For one_cycle optim +lr_div_factor: # Learning rate div factor + +# Specify optimizer +# Valid values are driven by the Transformers OptimizerNames class, see: +# https://github.com/huggingface/transformers/blob/cbf924b76c03828101a34069a96d209314114fd5/src/transformers/training_args.py#L144-L189 +# +# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of +# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used +# in the examples/ for your model and fine-tuning use case. +# +# Valid values for 'optimizer' include: +# - adamw_torch +# - adamw_torch_fused +# - adamw_torch_xla +# - adamw_torch_npu_fused +# - adamw_apex_fused +# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1) +# - adafactor +# - adamw_anyprecision +# - adamw_torch_4bit +# - ademamix +# - sgd +# - adagrad +# - adamw_bnb_8bit +# - adamw_8bit # alias for adamw_bnb_8bit +# - ademamix_8bit +# - lion_8bit +# - lion_32bit +# - paged_adamw_32bit +# - paged_adamw_8bit +# - paged_ademamix_32bit +# - paged_ademamix_8bit +# - paged_lion_32bit +# - paged_lion_8bit +# - rmsprop +# - rmsprop_bnb +# - rmsprop_bnb_8bit +# - rmsprop_bnb_32bit +# - galore_adamw +# - galore_adamw_8bit +# - galore_adafactor +# - galore_adamw_layerwise +# - galore_adamw_8bit_layerwise +# - galore_adafactor_layerwise +# - lomo +# - adalomo +# - grokadamw +# - schedule_free_adamw +# - schedule_free_sgd +# - apollo_adamw +# - apollo_adamw_layerwise +# +# Additional custom optimizers include: +# - optimi_adamw +# - ao_adamw_8bit +# - ao_adamw_fp8 +optimizer: +# Dictionary of arguments to pass to the optimizer +optim_args: +# For Galore Optimizers the following optim_args are available +# rank: # type: int +# update_proj_gap # type: int +# scale # type: float +# proj_type: # type: str, default = std + +# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm +optim_target_modules: +# - self_attn # for llama +# - mlp + +# Specify weight decay +weight_decay: +# adamw hyperparams +adam_beta1: +adam_beta2: +adam_epsilon: +# Gradient clipping max norm +max_grad_norm: + +# Augmentation techniques +# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings +# currently only supported on Llama and Mistral +neftune_noise_alpha: + +# Optional[bool]. Whether to bettertransformers +flash_optimum: + +# Note: Only one of the following attention patches can be used at a time. +# For example, if you set `xformers_attention` to `true`, do not set `flash_attention` to `true`. + +# Optional[bool]. Whether to use xformers attention patch https://github.com/facebookresearch/xformers: +xformers_attention: +# Optional[bool]. Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention: +flash_attention: +flash_attn_cross_entropy: # Optional[bool]. Whether to use flash-attention cross entropy implementation - advanced use only +flash_attn_rms_norm: # Optional[bool]. Whether to use flash-attention rms norm implementation - advanced use only +flash_attn_fuse_qkv: # Optional[bool]. Whether to fuse QKV into a single operation +flash_attn_fuse_mlp: # Optional[bool]. Whether to fuse part of the MLP into a single operation +# Optional[bool]. Whether to use scaled-dot-product attention +# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html +sdp_attention: +# Optional[bool]. Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf +s2_attention: + +# Optional[bool]. Whether to use low_cpu_mem_usage +low_cpu_mem_usage: +# Optional[str]. Resume from a specific checkpoint dir +resume_from_checkpoint: +# Optional[bool]. If resume_from_checkpoint isn't set and you simply want it to start where it left off. +# Be careful with this being turned on between different models. +auto_resume_from_checkpoints: false + +## Multimodal section +# int | tuple[int, int] | None . Size to resize images to, width x height. +# Will read from model/processor config if not set. +image_size: +# str. Algorithm to use for image resizing. "bilinear", "bicubic", "lanczos". Default is "bilinear". +image_resize_algorithm: 'bilinear' +## End of multimodal section + +# Don't mess with this, it's here for accelerate and torchrun +local_rank: + +# Add or change special tokens. +# If you add tokens here, you don't need to add them to the `tokens` list. +special_tokens: + # bos_token: "<s>" + # eos_token: "</s>" + # unk_token: "<unk>" + # pad_token: "[PAD]" + +# Add extra tokens. +tokens: + +# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer. +# Only works for tokens that are not part of the base vocab (aka are added_tokens). +# Can be checked if they exist in tokenizer.json added_tokens. +added_tokens_overrides: # Dict[int, str] +# 128041: "<|im_start|>" +# 128042: "<|im_end|>" + +# FSDP +fsdp: +fsdp_config: + +# Deepspeed config path. e.g., deepspeed_configs/zero3.json +deepspeed: + +# Advanced DDP Arguments +ddp_timeout: +ddp_bucket_cap_mb: +ddp_broadcast_buffers: + +# Sequence parallelism +# Set to a divisor of the number of GPUs available to split sequences into chunks of equal size. +# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM. +# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized +# subsequences, or set to 4 to split into four equal-sized subsequences. +# See https://axolotl-ai-cloud.github.io/axolotl/docs/sequence_parallelism.html for more details. +sequence_parallel_degree: +# Optional; strides across the key dimension. Larger values use more memory but should make training faster. +# Must evenly divide the number of KV heads in your model. +heads_k_stride: 1 + +# Path to torch distx for optim 'adamw_anyprecision' +torchdistx_path: + +# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize +pretraining_dataset: + +# Debug mode +debug: + +# Seed +seed: + +# Allow overwrite yml config using from cli +strict: diff --git a/docs/custom_integrations.html b/docs/custom_integrations.html index 539246448..4633d43f8 100644 --- a/docs/custom_integrations.html +++ b/docs/custom_integrations.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/dataset-formats/conversation.html b/docs/dataset-formats/conversation.html index ad8456333..35e0cbde3 100644 --- a/docs/dataset-formats/conversation.html +++ b/docs/dataset-formats/conversation.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/dataset-formats/index.html b/docs/dataset-formats/index.html index bd6835822..ec712a1a6 100644 --- a/docs/dataset-formats/index.html +++ b/docs/dataset-formats/index.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + @@ -507,6 +513,20 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin

Axolotl is a training framework that aims to make the process convenient yet flexible to users by simply passing a config yaml file.

As there are a lot of available options in Axolotl, this guide aims to provide an simplify the user experience to choosing the proper choice.

Axolotl supports 3 kinds of training methods: pre-training, supervised fine-tuning, and preference-based post-training (e.g. DPO, ORPO, PRMs). Each method has their own dataset format which are described below.

+
+
+
+ +
+
+Tip +
+
+
+

This guide will mainly use JSONL as an introduction. Please refer to the dataset loading docs to understand how to load datasets from other sources.

+

For pretraining_dataset: specifically, please refer to the Pre-training section.

+
+

Pre-training

When aiming to train on large corpora of text datasets, pre-training is your go-to choice. Due to the size of these datasets, downloading the entire-datasets before beginning training would be prohibitively time-consuming. Axolotl supports streaming to only load batches into memory at a time.

diff --git a/docs/dataset-formats/inst_tune.html b/docs/dataset-formats/inst_tune.html index 4a944a763..7c081aeaf 100644 --- a/docs/dataset-formats/inst_tune.html +++ b/docs/dataset-formats/inst_tune.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/dataset-formats/pretraining.html b/docs/dataset-formats/pretraining.html index b98992bc8..932056ddf 100644 --- a/docs/dataset-formats/pretraining.html +++ b/docs/dataset-formats/pretraining.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/dataset-formats/stepwise_supervised.html b/docs/dataset-formats/stepwise_supervised.html index 86b46c8ff..4cdad9810 100644 --- a/docs/dataset-formats/stepwise_supervised.html +++ b/docs/dataset-formats/stepwise_supervised.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/dataset-formats/template_free.html b/docs/dataset-formats/template_free.html index b76cadfc9..2206b52d3 100644 --- a/docs/dataset-formats/template_free.html +++ b/docs/dataset-formats/template_free.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/dataset-formats/tokenized.html b/docs/dataset-formats/tokenized.html index a77a538d7..5f07f6657 100644 --- a/docs/dataset-formats/tokenized.html +++ b/docs/dataset-formats/tokenized.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/dataset_loading.html b/docs/dataset_loading.html new file mode 100644 index 000000000..439308f08 --- /dev/null +++ b/docs/dataset_loading.html @@ -0,0 +1,1205 @@ + + + + + + + + + + +Dataset Loading – Axolotl + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Dataset Loading

+
+ +
+
+ Understanding how to load datasets from different sources +
+
+ + +
+ + + + +
+ + + +
+ + +
+

Overview

+

Datasets can be loaded in a number of different ways depending on the how it is saved (the extension of the file) and where it is stored.

+
+
+

Loading Datasets

+

We use the datasets library to load datasets and a mix of load_dataset and load_from_disk to load them.

+

You may recognize the similar named configs between load_dataset and the datasets section of the config file.

+
datasets:
+  - path:
+    name:
+    data_files:
+    split:
+    revision:
+    trust_remote_code:
+
+
+
+ +
+
+Tip +
+
+
+

Do not feel overwhelmed by the number of options here. A lot of them are optional. In fact, the most common config to use would be path and sometimes data_files.

+
+
+

This matches the API of datasets.load_dataset, so if you’re familiar with that, you will feel right at home.

+

For HuggingFace’s guide to load different dataset types, see here.

+

For full details on the config, see config.qmd.

+
+
+
+ +
+
+Note +
+
+
+

You can set multiple datasets in the config file by more than one entry under datasets.

+
datasets:
+  - path: /path/to/your/dataset
+  - path: /path/to/your/other/dataset
+
+
+
+

Local dataset

+
+

Files

+

Usually, to load a JSON file, you would do something like this:

+
from datasets import load_dataset
+
+dataset = load_dataset("json", data_files="data.json")
+

Which translates to the following config:

+
datasets:
+  - path: json
+    data_files: /path/to/your/file.jsonl
+

However, to make things easier, we have added a few shortcuts for loading local dataset files.

+

You can just point the path to the file or directory along with the ds_type to load the dataset. The below example shows for a JSON file:

+
datasets:
+  - path: /path/to/your/file.jsonl
+    ds_type: json
+

This works for CSV, JSON, Parquet, and Arrow files.

+
+
+
+ +
+
+Tip +
+
+
+

If path points to a file and ds_type is not specified, we will automatically infer the dataset type from the file extension, so you could omit ds_type if you’d like.

+
+
+
+
+

Directory

+

If you’re loading a directory, you can point the path to the directory.

+

Then, you have two options:

+
+
Loading entire directory
+

You do not need any additional configs.

+

We will attempt to load in the following order: +- datasets saved with datasets.save_to_disk +- loading entire directory of files (such as with parquet/arrow files)

+
datasets:
+  - path: /path/to/your/directory
+
+
+
Loading specific files in directory
+

Provide data_files with a list of files to load.

+
datasets:
+    # single file
+  - path: /path/to/your/directory
+    ds_type: csv
+    data_files: file1.csv
+
+    # multiple files
+  - path: /path/to/your/directory
+    ds_type: json
+    data_files:
+      - file1.jsonl
+      - file2.jsonl
+
+    # multiple files for parquet
+  - path: /path/to/your/directory
+    ds_type: parquet
+    data_files:
+      - file1.parquet
+      - file2.parquet
+
+
+
+
+

HuggingFace Hub

+

The method you use to load the dataset depends on how the dataset was created, whether a folder was uploaded directly or a HuggingFace Dataset was pushed.

+
+
+
+ +
+
+Note +
+
+
+

If you’re using a private dataset, you will need to enable the hf_use_auth_token flag in the root-level of the config file.

+
+
+
+

Folder uploaded

+

This would mean that the dataset is a single file or file(s) uploaded to the Hub.

+
datasets:
+  - path: org/dataset-name
+    data_files:
+      - file1.jsonl
+      - file2.jsonl
+
+
+

HuggingFace Dataset

+

This means that the dataset is created as a HuggingFace Dataset and pushed to the Hub via datasets.push_to_hub.

+
datasets:
+  - path: org/dataset-name
+
+
+
+ +
+
+Note +
+
+
+

There are some other configs which may be required like name, split, revision, trust_remote_code, etc depending on the dataset.

+
+
+
+
+
+

Remote Filesystems

+

Via the storage_options config under load_dataset, you can load datasets from remote filesystems like S3, GCS, Azure, and OCI.

+
+
+
+ +
+
+Warning +
+
+
+

This is currently experimental. Please let us know if you run into any issues!

+
+
+

The only difference between the providers is that you need to prepend the path with the respective protocols.

+
datasets:
+    # Single file
+  - path: s3://bucket-name/path/to/your/file.jsonl
+
+    # Directory
+  - path: s3://bucket-name/path/to/your/directory
+

For directory, we load via load_from_disk.

+
+

S3

+

Prepend the path with s3://.

+

The credentials are pulled in the following order:

+
    +
  • AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and AWS_SESSION_TOKEN environment variables
  • +
  • from the ~/.aws/credentials file
  • +
  • for nodes on EC2, the IAM metadata provider
  • +
+
+
+
+ +
+
+Note +
+
+
+

We assume you have credentials setup and not using anonymous access. If you want to use anonymous access, let us know! We may have to open a config option for this.

+
+
+

Other environment variables that can be set can be found in boto3 docs

+
+
+

GCS

+

Prepend the path with gs:// or gcs://.

+

The credentials are loaded in the following order:

+
    +
  • gcloud credentials
  • +
  • for nodes on GCP, the google metadata service
  • +
  • anonymous access
  • +
+
+
+

Azure

+
+
Gen 1
+

Prepend the path with adl://.

+

Ensure you have the following environment variables set:

+
    +
  • AZURE_STORAGE_TENANT_ID
  • +
  • AZURE_STORAGE_CLIENT_ID
  • +
  • AZURE_STORAGE_CLIENT_SECRET
  • +
+
+
+
Gen 2
+

Prepend the path with abfs:// or az://.

+

Ensure you have the following environment variables set:

+
    +
  • AZURE_STORAGE_ACCOUNT_NAME
  • +
  • AZURE_STORAGE_ACCOUNT_KEY
  • +
+

Other environment variables that can be set can be found in adlfs docs

+
+
+
+

OCI

+

Prepend the path with oci://.

+

It would attempt to read in the following order:

+
    +
  • OCIFS_IAM_TYPE, OCIFS_CONFIG_LOCATION, and OCIFS_CONFIG_PROFILE environment variables
  • +
  • when on OCI resource, resource principal
  • +
+

Other environment variables:

+
    +
  • OCI_REGION_METADATA
  • +
+

Please see the ocifs docs.

+
+
+
+

HTTPS

+

The path should start with https://.

+
datasets:
+  - path: https://path/to/your/dataset/file.jsonl
+

This must be publically accessible.

+
+
+
+

Next steps

+

Now that you know how to load datasets, you can learn more on how to load your specific dataset format into your target output format dataset formats docs.

+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/docs/dataset_preprocessing.html b/docs/dataset_preprocessing.html index 16a4c7e8a..81b311e15 100644 --- a/docs/dataset_preprocessing.html +++ b/docs/dataset_preprocessing.html @@ -293,6 +293,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/debugging.html b/docs/debugging.html index afe13444f..68cfa0468 100644 --- a/docs/debugging.html +++ b/docs/debugging.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/docker.html b/docs/docker.html index 97d71320e..af5ef425a 100644 --- a/docs/docker.html +++ b/docs/docker.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/faq.html b/docs/faq.html index 7f5a61a2b..f9de392de 100644 --- a/docs/faq.html +++ b/docs/faq.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/fsdp_qlora.html b/docs/fsdp_qlora.html index b7297eb0d..b4a3cfc99 100644 --- a/docs/fsdp_qlora.html +++ b/docs/fsdp_qlora.html @@ -293,6 +293,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/getting-started.html b/docs/getting-started.html index 9f7935db4..f61a1ceb5 100644 --- a/docs/getting-started.html +++ b/docs/getting-started.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/inference.html b/docs/inference.html index a07a334a9..1ed67aefe 100644 --- a/docs/inference.html +++ b/docs/inference.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/input_output.html b/docs/input_output.html index fd43af5d8..80234f56e 100644 --- a/docs/input_output.html +++ b/docs/input_output.html @@ -293,6 +293,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/installation.html b/docs/installation.html index a2425e060..e353409f4 100644 --- a/docs/installation.html +++ b/docs/installation.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/lora_optims.html b/docs/lora_optims.html index b6512f832..ba96aa076 100644 --- a/docs/lora_optims.html +++ b/docs/lora_optims.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/lr_groups.html b/docs/lr_groups.html index 3b409408f..c0fbcec45 100644 --- a/docs/lr_groups.html +++ b/docs/lr_groups.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/mac.html b/docs/mac.html index 949f013df..701d32ea8 100644 --- a/docs/mac.html +++ b/docs/mac.html @@ -293,6 +293,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/multi-gpu.html b/docs/multi-gpu.html index 0ae03c4a0..a918fca0a 100644 --- a/docs/multi-gpu.html +++ b/docs/multi-gpu.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/multi-node.html b/docs/multi-node.html index 887c3d604..a073f742b 100644 --- a/docs/multi-node.html +++ b/docs/multi-node.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/multimodal.html b/docs/multimodal.html index 57779810d..1c2806bb5 100644 --- a/docs/multimodal.html +++ b/docs/multimodal.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/multipack.html b/docs/multipack.html index 96757f26b..00fcb06d2 100644 --- a/docs/multipack.html +++ b/docs/multipack.html @@ -293,6 +293,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/docs/nccl.html b/docs/nccl.html index b2caa2127..e6f33f88a 100644 --- a/docs/nccl.html +++ b/docs/nccl.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/ray-integration.html b/docs/ray-integration.html index c4e7c586a..c53fae744 100644 --- a/docs/ray-integration.html +++ b/docs/ray-integration.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/reward_modelling.html b/docs/reward_modelling.html index a0419956a..9d9728fcd 100644 --- a/docs/reward_modelling.html +++ b/docs/reward_modelling.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/rlhf.html b/docs/rlhf.html index 6de8c9ad9..0f653f4aa 100644 --- a/docs/rlhf.html +++ b/docs/rlhf.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/sequence_parallelism.html b/docs/sequence_parallelism.html index c089da968..be5320de0 100644 --- a/docs/sequence_parallelism.html +++ b/docs/sequence_parallelism.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/torchao.html b/docs/torchao.html index 4257dc05d..7c85fd826 100644 --- a/docs/torchao.html +++ b/docs/torchao.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/docs/unsloth.html b/docs/unsloth.html index 4a32b436d..158bb9c94 100644 --- a/docs/unsloth.html +++ b/docs/unsloth.html @@ -327,6 +327,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/examples/colab-notebooks/colab-axolotl-example.html b/examples/colab-notebooks/colab-axolotl-example.html index 81249f9e0..daffad8cc 100644 --- a/examples/colab-notebooks/colab-axolotl-example.html +++ b/examples/colab-notebooks/colab-axolotl-example.html @@ -355,6 +355,12 @@ window.Quarto = { LoRA Optimizations + + diff --git a/index.html b/index.html index f30b87345..6dbe1612d 100644 --- a/index.html +++ b/index.html @@ -326,6 +326,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin LoRA Optimizations + + diff --git a/search.json b/search.json index 765c049d7..be1b9ec67 100644 --- a/search.json +++ b/search.json @@ -152,7 +152,7 @@ "href": "docs/config.html", "title": "Config Reference", "section": "", - "text": "# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files\n# This can also be a relative path to a model on disk\nbase_model: ./llama-7b-hf\n# You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)\nbase_model_ignore_patterns:\n# If the base_model repo on hf hub doesn't include configuration .json files,\n# You can set that here, or leave this empty to default to base_model\nbase_model_config: ./llama-7b-hf\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model:\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config:\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too\nmodel_type: AutoModelForCausalLM\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: AutoTokenizer\n# Trust remote code for untrusted source\ntrust_remote_code:\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast:\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy:\n# Resize the model embeddings when new tokens are added to multiples of 32\n# This is reported to improve training speed on some models\nresize_token_embeddings_to_32x:\n# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings:\n# Whether to load the model with randomly initialized weights. Useful for\n# pre-training a model from scratch or debugging purposes.\nrandom_init_weights:\n\n# (Internal use only)\n# Used to identify which the model is based on\nis_falcon_derived_model:\nis_llama_derived_model:\nis_qwen_derived_model:\n# Please note that if you set this to true, `padding_side` will be set to \"left\" by default\nis_mistral_derived_model:\n\n# optional overrides to the base model configuration\noverrides_of_model_config:\n # RoPE Scaling https://github.com/huggingface/transformers/pull/24653\n rope_scaling:\n type: # linear | dynamic\n factor: # float\n\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs:\n # use_cache: False\n\n# optional overrides to the bnb 4bit quantization configuration\n# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig\nbnb_config_kwargs:\n # These are default values\n llm_int8_has_fp16_weight: false\n bnb_4bit_quant_type: nf4\n bnb_4bit_use_double_quant: true\n\n\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: true\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: true\n# Use bitsandbytes 4 bit\nload_in_4bit:\n\n# Use CUDA bf16\nbf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere\n# Use CUDA fp16\nfp16: true\n# Use CUDA tf32\ntf32: true # require >=ampere\n\n# No AMP (automatic mixed precision)\nbfloat16: true # require >=ampere\nfloat16: true\n\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset\ngpu_memory_limit: 20GiB\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: true\n\n# List[str]. Add plugins to extend the pipeline.\n# See `src/axolotl/integrations` for the available plugins or doc below for more details.\n# https://axolotl-ai-cloud.github.io/axolotl/docs/custom_integrations.html\nplugins:\n # - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin\n\n# A list of one or more datasets to finetune the model with\ndatasets:\n # HuggingFace dataset repo | s3://,gs:// path | \"json\" for local dataset, make sure to fill data_files\n - path: vicgalle/alpaca-gpt4\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>\n ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file\n data_files: # Optional[str] path to source data files\n\n shards: # Optional[int] split dataset into N pieces (use with shards_idx)\n shards_idx: # Optional[int] = 0 the index of sharded dataset to use\n\n preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)\n\n name: # Optional[str] name of dataset configuration to load\n train_on_split: train # Optional[str] name of dataset split to load from\n revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets.\n trust_remote_code: # Optional[bool] Trust remote code for untrusted source\n\n # Custom user instruction prompt\n - path: repo\n type:\n # The below are defaults. only set what's needed if you use a different column name.\n system_prompt: \"\"\n system_format: \"{system}\"\n field_system: system\n field_instruction: instruction\n field_input: input\n field_output: output\n\n # Customizable to be single line or multi-line\n # Use {instruction}/{input} as key to be replaced\n # 'format' can include {input}\n format: |-\n User: {instruction} {input}\n Assistant:\n # 'no_input_format' cannot include {input}\n no_input_format: \"{instruction} \"\n\n # For `completion` datsets only, uses the provided field instead of `text` column\n field:\n\n # Using chat template\n - path: ...\n # Set type to `chat_template` to use this strategy\n type: chat_template\n # Specify the name of the chat template to use\n # The name of the chat template to use for training, following values are supported:\n # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default.\n # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml.\n # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n chat_template: tokenizer_default\n\n # Custom jinja chat template. Used only if `chat_template: jinja` or empty.\n chat_template_jinja:\n\n # Key containing the messages (default: \"messages\")\n field_messages: messages\n\n # Mapping of properties from the input dataset to the chat template.\n # (default: message_property_mappings={'role':'role', 'content':'content'})\n # If a property exists in the template but not in this mapping, the system will attempt\n # to load it directly from the message using the property name as the key.\n # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role',\n # while 'value' is loaded and used as 'content' in the chat template.\n message_property_mappings:\n role: from\n content: value\n # ...\n\n # Optional[Dict[str, List]]. Roles mapping in the messages. The default is:\n roles:\n user: [\"human\", \"user\"]\n assistant: [\"gpt\", \"assistant\"]\n system: [\"system\"]\n tool: [\"tool\"]\n\n # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If you wish to,\n # we recommend using a custom jinja template with the default system message removed or\n # adding a system turn with empty content.\n drop_system_message:\n\n # IMPORTANT: The following fields determine which parts of the conversation to train on.\n # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train\n # See examples at `docs/dataset-formats/conversation.qmd`\n # Note: If the below 4 fields are set to empty, defaults to training only on the last message.\n\n # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: [\"assistant\"] # default\n # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are:\n # - all: train on all EOS tokens\n # - turn (default): train on the EOS token at the end of each trainable turn\n # - last: train on the last EOS token in the conversation\n # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.\n train_on_eos: last\n # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.\n message_field_training: training\n # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.\n # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train).\n message_field_training_detail: train_detail\n\n\n# If false, the datasets will not be shuffled and will keep their original order in `datasets`.\n# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: true\n\nDeduplicates datasets and test_datasets with identical entries.\ndataset_exact_deduplication: true\n\n# A list of one or more datasets to eval the model with.\n# You can use either test_datasets, or val_set_size, but not both.\ntest_datasets:\n - path: /workspace/data/eval.jsonl\n ds_type: json\n # You need to specify a split. For \"json\" datasets the default split is called \"train\".\n split: train\n type: completion\n data_files:\n - /workspace/data/eval.jsonl\n\n# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl:\nrl_beta: # Optional[float]. The beta parameter for the RL training.\n\n# dpo\ndpo_use_weighting: # Optional[bool]. Whether to perform weighting.\nrpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper.\n\n# orpo\norpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping.\n\n# kto\nkto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss.\nkto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss.\n\n# simpo\ncpo_alpha: 1.0 # Weight of the BC regularizer\nsimpo_gamma: 0.5 # Target reward margin for the SimPO loss\n\n# grpo\ntrl:\n use_vllm: # Optional[bool]. Whether to use VLLM for RL training.\n vllm_server_host: # Optional[str]. Host of the vLLM server to connect to.\n vllm_server_port: # Optional[int]. Port of the vLLM server to connect to.\n vllm_server_timeout: # Optional[int]. Total timeout (in seconds) to wait for the vLLM server to respond.\n vllm_guided_decoding_regex: # Optional[str]. Regex for vLLM guided decoding.\n\n beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use\n max_completion_length: # Optional[int]. Maximum length of the completion for RL training.\n\n reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir.\n reward_weights: # Optional[list[float]]. List of reward weights for the reward functions.\n\n num_generations: # Optional[int]. Number of generations to sample.\n log_completions: # Optional[bool]. Whether to log completions.\n\n sync_ref_model: # Optional[bool]. Whether to sync the reference model.\n ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model.\n ref_model_sync_steps: # Optional[int]. Sync steps for the reference model.\n\n\n# reward modelling: `True` or `False`\nreward_model:\n\n# process reward modelling: `True` or `False`\nprocess_reward_model:\n\n# The name of the chat template to use for training, following values are supported:\n# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value.\n# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer.\n# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n# The selected chat template will be saved to the tokenizer_config.json for easier inferencing\n# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template.\nchat_template: tokenizer_default\n# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.\nchat_template_jinja: null\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: You are a helpful assistant. Please give a long and detailed answer.\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: data/last_run_prepared\n# Push prepared dataset to hub\npush_dataset_to_hub: # Optional[str] repo_org/repo_name\n# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`\n# if not set.\ndataset_processes: # defaults to os.cpu_count() if not set\n# Keep dataset in memory while preprocessing\n# Only needed if cached dataset is taking too much storage\ndataset_keep_in_memory:\n# push checkpoints to hub\nhub_model_id: # private repo path to push finetuned model\n# how to push checkpoints to hub\n# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy\nhub_strategy:\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets\n# Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: # boolean\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.\nval_set_size: 0.04\n# Num shards for whole dataset\ndataset_shard_num:\n# Index of shard to use for whole dataset\ndataset_shard_idx:\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: 2048\n# Pad inputs so each step uses constant sized buffers\n# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently\npad_to_sequence_len:\n# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'\nsample_packing:\n# Set to 'false' if getting errors during eval with sample_packing on.\neval_sample_packing:\n# You can set these packing optimizations AFTER starting a training at least once.\n# The trainer will provide recommended values for these values.\nsample_packing_eff_est:\ntotal_num_tokens:\n# Increasing the following values helps with packing, but usually only slightly (<%1.)\n# The number of samples packed at a time.\nsample_packing_group_size: 100000\n# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples.\nsample_packing_bin_size: 200\nsample_pack_sequentially: # Optional[bool]. Whether to pack samples sequentially.\n\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation:\n\ncurriculum_sampling: # Optional[bool]. Whether to use sequential sampling for curriculum learning\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening:\n\n# Passed through to transformers when loading the model when launched without accelerate\n# Use `sequential` when training w/ model parallelism to limit memory\ndevice_map:\n# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.\nmax_memory:\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model\nadapter: lora\n# If you already have a lora model trained that you want to load, put that here.\n# This means after training, if you want to test the model, you should set this to the value of `output_dir`.\n# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir:\n\n# LoRA hyperparameters\n# For more details about the following options, see:\n# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2\nlora_r: 8\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_modules:\n - q_proj\n - v_proj\n# - k_proj\n# - o_proj\n# - gate_proj\n# - down_proj\n# - up_proj\nlora_target_linear: # If true, will target all linear modules\n\n# List[int] | int. # The layer indices to transform, otherwise, apply to all layers\n# https://huggingface.co/docs/peft/v0.15.0/en/package_reference/lora#peft.LoraConfig.layers_to_transform\npeft_layers_to_transform:\n\n# Optional[bool]. Whether to use DoRA.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#weight-decomposed-low-rank-adaptation-dora\npeft_use_dora:\n\n# Optional[bool]. Whether to use RSLoRA.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#rank-stabilized-lora\npeft_use_rslora:\n\n# Optional[list[tuple[int, int]]]. List of layer indices to replicate.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#memory-efficient-layer-replication-with-lora\npeft_layer_replication:\n\n# bool | Literal[\"gaussian\", \"eva\", \"olora\", \"pissa\", \"pissa_niter_[number of iters]\", \"corda\", \"loftq\"]\n# How to initialize LoRA weights. Default to True which is MS original implementation.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#initialization\npeft_init_lora_weights:\n\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.\n# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.\n# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\n# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994\nlora_modules_to_save:\n# - embed_tokens\n# - lm_head\n\nlora_fan_in_fan_out: false\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for\n# speed and memory savings\n# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html\nlora_mlp_kernel: true\nlora_qkv_kernel: true\nlora_o_kernel: true\n\n# LoRA+ hyperparameters\n# For more details about the following options, see:\n# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py`\nloraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6.\n\npeft:\n # Configuration options for loftq initialization for LoRA\n # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization\n loftq_config:\n loftq_bits: # typically 4 bits\n\n# ReLoRA configuration\n# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed\nrelora_steps: # Number of steps per ReLoRA restart\nrelora_warmup_steps: # Number of per-restart warmup steps\nrelora_anneal_steps: # Number of anneal steps for each relora cycle\nrelora_prune_ratio: # threshold for optimizer magnitude when pruning\nrelora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings\n\n# wandb configuration if you're using it\n# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.\nwandb_mode: # \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn off wandb\nwandb_project: # Your wandb project name\nwandb_entity: # A wandb Team name if using a Team\nwandb_watch:\nwandb_name: # Set the name of your wandb run\nwandb_run_id: # Set the ID of your wandb run\nwandb_log_model: # \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only at the end of training\n\n# mlflow configuration if you're using it\nmlflow_tracking_uri: # URI to mlflow\nmlflow_experiment_name: # Your experiment name\nmlflow_run_name: # Your run name\nhf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry\n\n# Comet configuration if you're using it\n# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`.\n# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start\nuse_comet: # Enable or disable Comet integration.\ncomet_api_key: # API key for Comet. Recommended to set via `comet login`.\ncomet_workspace: # Workspace name in Comet. Defaults to the user's default workspace.\ncomet_project_name: # Project name in Comet. Defaults to Uncategorized.\ncomet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key.\ncomet_mode: # Create a new experiment (\"create\") or log to an existing one (\"get\"). Default (\"get_or_create\") auto-selects based on configuration.\ncomet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True.\ncomet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details.\n\n# Tensorboard\nuse_tensorboard: # Optional[bool]\n\n# Where to save the full-finetuned model to\noutput_dir: ./completed-model\n\n# Whether to use torch.compile and which backend to use\n# setting to `auto` will enable torch compile when torch>=2.5.1\ntorch_compile: # Optional[Union[Literal[\"auto\"], bool]]\ntorch_compile_backend: # Optional[str]\n\n# Training hyperparameters\n\n# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.\ngradient_accumulation_steps: 1\n# The number of samples to include in each batch. This is the number of samples sent to each GPU.\n# Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: 2\neval_batch_size:\nnum_epochs: 4\nwarmup_steps: 100 # cannot use with warmup_ratio\nwarmup_ratio: 0.05 # cannot use with warmup_steps\nlearning_rate: 0.00003\nlr_quadratic_warmup:\nlogging_steps:\neval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps\nevals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps\neval_strategy: # Set to `\"no\"` to skip evaluation, `\"epoch\"` at end of each epoch, leave empty to infer from `eval_steps`.\nsave_strategy: # Set to `\"no\"` to skip checkpoint saves, `\"epoch\"` at end of each epoch, `\"best\"` when better result is achieved, leave empty to infer from `save_steps`.\nsave_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps\nsaves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsave_total_limit: # Checkpoints saved at a time\n# Maximum number of iterations to train for. It precedes num_epochs which means that\n# if both are set, num_epochs will not be guaranteed.\n# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps:\n\n# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second: # Optional[bool]\n\n# whether to find batch size that fits in memory. Passed to underlying transformers Trainer\nauto_find_batch_size: # Optional[bool]\n\neval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0\neval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128\ndo_causal_lm_eval: # Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`.\neval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is [\"sacrebleu\", \"comet\", \"ter\", \"chrf\", \"perplexity\"]\n\nprofiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir.\n # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information\n # snapshots can be visualized @ https://pytorch.org/memory_viz\n\nloss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)\nloss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)\n\n# Save model as safetensors (require safetensors package)\nsave_safetensors:\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: false\n# Group similarly sized data to minimize padding.\n# May be slower to start, as it must download and sort the entire dataset.\n# Note that training loss may have an oscillating pattern with this enabled.\ngroup_by_length: false\n\n# Whether to use gradient checkpointing. Available options are: true, false, \"offload\".\n# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: false\n# additional kwargs to pass to the trainer for gradient checkpointing\n# gradient_checkpointing_kwargs:\n# use_reentrant: true\n\n# Stop training after this many evaluation losses have increased in a row\n# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback\nearly_stopping_patience: 3\n\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine\nlr_scheduler_kwargs:\ncosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr\ncosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)\n\n# For one_cycle optim\nlr_div_factor: # Learning rate div factor\n\n# Specify optimizer\n# Valid values are driven by the Transformers OptimizerNames class, see:\n# https://github.com/huggingface/transformers/blob/cbf924b76c03828101a34069a96d209314114fd5/src/transformers/training_args.py#L144-L189\n#\n# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of\n# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used\n# in the examples/ for your model and fine-tuning use case.\n#\n# Valid values for 'optimizer' include:\n# - adamw_torch\n# - adamw_torch_fused\n# - adamw_torch_xla\n# - adamw_torch_npu_fused\n# - adamw_apex_fused\n# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1)\n# - adafactor\n# - adamw_anyprecision\n# - adamw_torch_4bit\n# - ademamix\n# - sgd\n# - adagrad\n# - adamw_bnb_8bit\n# - adamw_8bit # alias for adamw_bnb_8bit\n# - ademamix_8bit\n# - lion_8bit\n# - lion_32bit\n# - paged_adamw_32bit\n# - paged_adamw_8bit\n# - paged_ademamix_32bit\n# - paged_ademamix_8bit\n# - paged_lion_32bit\n# - paged_lion_8bit\n# - rmsprop\n# - rmsprop_bnb\n# - rmsprop_bnb_8bit\n# - rmsprop_bnb_32bit\n# - galore_adamw\n# - galore_adamw_8bit\n# - galore_adafactor\n# - galore_adamw_layerwise\n# - galore_adamw_8bit_layerwise\n# - galore_adafactor_layerwise\n# - lomo\n# - adalomo\n# - grokadamw\n# - schedule_free_adamw\n# - schedule_free_sgd\n# - apollo_adamw\n# - apollo_adamw_layerwise\n#\n# Additional custom optimizers include:\n# - optimi_adamw\n# - ao_adamw_8bit\n# - ao_adamw_fp8\noptimizer:\n# Dictionary of arguments to pass to the optimizer\noptim_args:\n# For Galore Optimizers the following optim_args are available\n# rank: # type: int\n# update_proj_gap # type: int\n# scale # type: float\n# proj_type: # type: str, default = std\n\n# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm\noptim_target_modules:\n# - self_attn # for llama\n# - mlp\n\n# Specify weight decay\nweight_decay:\n# adamw hyperparams\nadam_beta1:\nadam_beta2:\nadam_epsilon:\n# Gradient clipping max norm\nmax_grad_norm:\n\n# Augmentation techniques\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings\n# currently only supported on Llama and Mistral\nneftune_noise_alpha:\n\n# Optional[bool]. Whether to bettertransformers\nflash_optimum:\n\n# Note: Only one of the following attention patches can be used at a time.\n# For example, if you set `xformers_attention` to `true`, do not set `flash_attention` to `true`.\n\n# Optional[bool]. Whether to use xformers attention patch https://github.com/facebookresearch/xformers:\nxformers_attention:\n# Optional[bool]. Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:\nflash_attention:\nflash_attn_cross_entropy: # Optional[bool]. Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_rms_norm: # Optional[bool]. Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_fuse_qkv: # Optional[bool]. Whether to fuse QKV into a single operation\nflash_attn_fuse_mlp: # Optional[bool]. Whether to fuse part of the MLP into a single operation\n# Optional[bool]. Whether to use scaled-dot-product attention\n# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention:\n# Optional[bool]. Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention:\n\n# Optional[bool]. Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage:\n# Optional[str]. Resume from a specific checkpoint dir\nresume_from_checkpoint:\n# Optional[bool]. If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: false\n\n## Multimodal section\n# int | tuple[int, int] | None . Size to resize images to, width x height.\n# Will read from model/processor config if not set.\nimage_size:\n# str. Algorithm to use for image resizing. \"bilinear\", \"bicubic\", \"lanczos\". Default is \"bilinear\".\nimage_resize_algorithm: 'bilinear'\n## End of multimodal section\n\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank:\n\n# Add or change special tokens.\n# If you add tokens here, you don't need to add them to the `tokens` list.\nspecial_tokens:\n # bos_token: \"<s>\"\n # eos_token: \"</s>\"\n # unk_token: \"<unk>\"\n # pad_token: \"[PAD]\"\n\n# Add extra tokens.\ntokens:\n\n# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.\n# Only works for tokens that are not part of the base vocab (aka are added_tokens).\n# Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: # Dict[int, str]\n# 128041: \"<|im_start|>\"\n# 128042: \"<|im_end|>\"\n\n# FSDP\nfsdp:\nfsdp_config:\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed:\n\n# Advanced DDP Arguments\nddp_timeout:\nddp_bucket_cap_mb:\nddp_broadcast_buffers:\n\n# Sequence parallelism\n# Set to a divisor of the number of GPUs available to split sequences into chunks of equal size.\n# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM.\n# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized\n# subsequences, or set to 4 to split into four equal-sized subsequences.\n# See https://axolotl-ai-cloud.github.io/axolotl/docs/sequence_parallelism.html for more details.\nsequence_parallel_degree:\n# Optional; strides across the key dimension. Larger values use more memory but should make training faster.\n# Must evenly divide the number of KV heads in your model.\nheads_k_stride: 1\n\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path:\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset:\n\n# Debug mode\ndebug:\n\n# Seed\nseed:\n\n# Allow overwrite yml config using from cli\nstrict:", + "text": "# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files\n# This can also be a relative path to a model on disk\nbase_model: ./llama-7b-hf\n# You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)\nbase_model_ignore_patterns:\n# If the base_model repo on hf hub doesn't include configuration .json files,\n# You can set that here, or leave this empty to default to base_model\nbase_model_config: ./llama-7b-hf\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model:\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config:\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too\nmodel_type: AutoModelForCausalLM\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: AutoTokenizer\n# Trust remote code for untrusted source\ntrust_remote_code:\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast:\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy:\n# Resize the model embeddings when new tokens are added to multiples of 32\n# This is reported to improve training speed on some models\nresize_token_embeddings_to_32x:\n# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings:\n# Whether to load the model with randomly initialized weights. Useful for\n# pre-training a model from scratch or debugging purposes.\nrandom_init_weights:\n\n# (Internal use only)\n# Used to identify which the model is based on\nis_falcon_derived_model:\nis_llama_derived_model:\nis_qwen_derived_model:\n# Please note that if you set this to true, `padding_side` will be set to \"left\" by default\nis_mistral_derived_model:\n\n# optional overrides to the base model configuration\noverrides_of_model_config:\n # RoPE Scaling https://github.com/huggingface/transformers/pull/24653\n rope_scaling:\n type: # linear | dynamic\n factor: # float\n\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs:\n # use_cache: False\n\n# optional overrides to the bnb 4bit quantization configuration\n# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig\nbnb_config_kwargs:\n # These are default values\n llm_int8_has_fp16_weight: false\n bnb_4bit_quant_type: nf4\n bnb_4bit_use_double_quant: true\n\n\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: true\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: true\n# Use bitsandbytes 4 bit\nload_in_4bit:\n\n# Use CUDA bf16\nbf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere\n# Use CUDA fp16\nfp16: true\n# Use CUDA tf32\ntf32: true # require >=ampere\n\n# No AMP (automatic mixed precision)\nbfloat16: true # require >=ampere\nfloat16: true\n\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset\ngpu_memory_limit: 20GiB\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: true\n\n# List[str]. Add plugins to extend the pipeline.\n# See `src/axolotl/integrations` for the available plugins or doc below for more details.\n# https://axolotl-ai-cloud.github.io/axolotl/docs/custom_integrations.html\nplugins:\n # - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin\n\n# A list of one or more datasets to finetune the model with\ndatasets:\n # HuggingFace dataset repo | s3://,gs:// path | \"json\" for local dataset, make sure to fill data_files\n - path: vicgalle/alpaca-gpt4\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>\n ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file\n data_files: # Optional[str] path to source data files\n\n shards: # Optional[int] split dataset into N pieces (use with shards_idx)\n shards_idx: # Optional[int] = 0 the index of sharded dataset to use\n\n preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)\n\n name: # Optional[str] name of dataset configuration to load\n split: train # Optional[str] name of dataset split to load from\n revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets.\n trust_remote_code: # Optional[bool] Trust remote code for untrusted source\n\n # Custom user instruction prompt\n - path: repo\n type:\n # The below are defaults. only set what's needed if you use a different column name.\n system_prompt: \"\"\n system_format: \"{system}\"\n field_system: system\n field_instruction: instruction\n field_input: input\n field_output: output\n\n # Customizable to be single line or multi-line\n # Use {instruction}/{input} as key to be replaced\n # 'format' can include {input}\n format: |-\n User: {instruction} {input}\n Assistant:\n # 'no_input_format' cannot include {input}\n no_input_format: \"{instruction} \"\n\n # For `completion` datsets only, uses the provided field instead of `text` column\n field:\n\n # Using chat template\n - path: ...\n # Set type to `chat_template` to use this strategy\n type: chat_template\n # Specify the name of the chat template to use\n # The name of the chat template to use for training, following values are supported:\n # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default.\n # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml.\n # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n chat_template: tokenizer_default\n\n # Custom jinja chat template. Used only if `chat_template: jinja` or empty.\n chat_template_jinja:\n\n # Key containing the messages (default: \"messages\")\n field_messages: messages\n\n # Mapping of properties from the input dataset to the chat template.\n # (default: message_property_mappings={'role':'role', 'content':'content'})\n # If a property exists in the template but not in this mapping, the system will attempt\n # to load it directly from the message using the property name as the key.\n # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role',\n # while 'value' is loaded and used as 'content' in the chat template.\n message_property_mappings:\n role: from\n content: value\n # ...\n\n # Optional[Dict[str, List]]. Roles mapping in the messages.\n # The format is {target_role: [source_roles]}. All source roles will be mapped to the target role.\n # The default is:\n roles:\n user: [\"human\", \"user\"]\n assistant: [\"gpt\", \"assistant\"]\n system: [\"system\"]\n tool: [\"tool\"]\n\n # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If you wish to,\n # we recommend using a custom jinja template with the default system message removed or\n # adding a system turn with empty content.\n drop_system_message:\n\n # IMPORTANT: The following fields determine which parts of the conversation to train on.\n # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train\n # See examples at `docs/dataset-formats/conversation.qmd`\n # Note: If the below 4 fields are set to empty, defaults to training only on the last message.\n\n # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: [\"assistant\"] # default\n # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are:\n # - all: train on all EOS tokens\n # - turn (default): train on the EOS token at the end of each trainable turn\n # - last: train on the last EOS token in the conversation\n # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.\n train_on_eos: last\n # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.\n message_field_training: training\n # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.\n # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train).\n message_field_training_detail: train_detail\n\n\n# If false, the datasets will not be shuffled and will keep their original order in `datasets`.\n# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: true\n\nDeduplicates datasets and test_datasets with identical entries.\ndataset_exact_deduplication: true\n\n# A list of one or more datasets to eval the model with.\n# You can use either test_datasets, or val_set_size, but not both.\ntest_datasets:\n - path: /workspace/data/eval.jsonl\n ds_type: json\n # You need to specify a split. For \"json\" datasets the default split is called \"train\".\n split: train\n type: completion\n data_files:\n - /workspace/data/eval.jsonl\n\n# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl:\nrl_beta: # Optional[float]. The beta parameter for the RL training.\n\n# dpo\ndpo_use_weighting: # Optional[bool]. Whether to perform weighting.\nrpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper.\n\n# orpo\norpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping.\n\n# kto\nkto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss.\nkto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss.\n\n# simpo\ncpo_alpha: 1.0 # Weight of the BC regularizer\nsimpo_gamma: 0.5 # Target reward margin for the SimPO loss\n\n# grpo\ntrl:\n use_vllm: # Optional[bool]. Whether to use VLLM for RL training.\n vllm_server_host: # Optional[str]. Host of the vLLM server to connect to.\n vllm_server_port: # Optional[int]. Port of the vLLM server to connect to.\n vllm_server_timeout: # Optional[int]. Total timeout (in seconds) to wait for the vLLM server to respond.\n vllm_guided_decoding_regex: # Optional[str]. Regex for vLLM guided decoding.\n\n beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use\n max_completion_length: # Optional[int]. Maximum length of the completion for RL training.\n\n reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir.\n reward_weights: # Optional[list[float]]. List of reward weights for the reward functions.\n\n num_generations: # Optional[int]. Number of generations to sample.\n log_completions: # Optional[bool]. Whether to log completions.\n\n sync_ref_model: # Optional[bool]. Whether to sync the reference model.\n ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model.\n ref_model_sync_steps: # Optional[int]. Sync steps for the reference model.\n\n\n# reward modelling: `True` or `False`\nreward_model:\n\n# process reward modelling: `True` or `False`\nprocess_reward_model:\n\n# The name of the chat template to use for training, following values are supported:\n# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value.\n# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer.\n# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n# The selected chat template will be saved to the tokenizer_config.json for easier inferencing\n# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template.\nchat_template: tokenizer_default\n# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.\nchat_template_jinja: null\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: You are a helpful assistant. Please give a long and detailed answer.\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: data/last_run_prepared\n# Push prepared dataset to hub\npush_dataset_to_hub: # Optional[str] repo_org/repo_name\n# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`\n# if not set.\ndataset_processes: # defaults to os.cpu_count() if not set\n# Keep dataset in memory while preprocessing\n# Only needed if cached dataset is taking too much storage\ndataset_keep_in_memory:\n# push checkpoints to hub\nhub_model_id: # private repo path to push finetuned model\n# how to push checkpoints to hub\n# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy\nhub_strategy:\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets\n# Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: # boolean\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.\nval_set_size: 0.04\n# Num shards for whole dataset\ndataset_shard_num:\n# Index of shard to use for whole dataset\ndataset_shard_idx:\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: 2048\n# Pad inputs so each step uses constant sized buffers\n# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently\npad_to_sequence_len:\n# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'\nsample_packing:\n# Set to 'false' if getting errors during eval with sample_packing on.\neval_sample_packing:\n# You can set these packing optimizations AFTER starting a training at least once.\n# The trainer will provide recommended values for these values.\nsample_packing_eff_est:\ntotal_num_tokens:\n# Increasing the following values helps with packing, but usually only slightly (<%1.)\n# The number of samples packed at a time.\nsample_packing_group_size: 100000\n# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples.\nsample_packing_bin_size: 200\nsample_pack_sequentially: # Optional[bool]. Whether to pack samples sequentially.\n\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation:\n\ncurriculum_sampling: # Optional[bool]. Whether to use sequential sampling for curriculum learning\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening:\n\n# Passed through to transformers when loading the model when launched without accelerate\n# Use `sequential` when training w/ model parallelism to limit memory\ndevice_map:\n# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.\nmax_memory:\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model\nadapter: lora\n# If you already have a lora model trained that you want to load, put that here.\n# This means after training, if you want to test the model, you should set this to the value of `output_dir`.\n# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir:\n\n# LoRA hyperparameters\n# For more details about the following options, see:\n# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2\nlora_r: 8\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_modules:\n - q_proj\n - v_proj\n# - k_proj\n# - o_proj\n# - gate_proj\n# - down_proj\n# - up_proj\nlora_target_linear: # If true, will target all linear modules\n\n# List[int] | int. # The layer indices to transform, otherwise, apply to all layers\n# https://huggingface.co/docs/peft/v0.15.0/en/package_reference/lora#peft.LoraConfig.layers_to_transform\npeft_layers_to_transform:\n\n# Optional[bool]. Whether to use DoRA.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#weight-decomposed-low-rank-adaptation-dora\npeft_use_dora:\n\n# Optional[bool]. Whether to use RSLoRA.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#rank-stabilized-lora\npeft_use_rslora:\n\n# Optional[list[tuple[int, int]]]. List of layer indices to replicate.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#memory-efficient-layer-replication-with-lora\npeft_layer_replication:\n\n# bool | Literal[\"gaussian\", \"eva\", \"olora\", \"pissa\", \"pissa_niter_[number of iters]\", \"corda\", \"loftq\"]\n# How to initialize LoRA weights. Default to True which is MS original implementation.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#initialization\npeft_init_lora_weights:\n\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.\n# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.\n# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\n# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994\nlora_modules_to_save:\n# - embed_tokens\n# - lm_head\n\nlora_fan_in_fan_out: false\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for\n# speed and memory savings\n# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html\nlora_mlp_kernel: true\nlora_qkv_kernel: true\nlora_o_kernel: true\n\n# LoRA+ hyperparameters\n# For more details about the following options, see:\n# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py`\nloraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6.\n\npeft:\n # Configuration options for loftq initialization for LoRA\n # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization\n loftq_config:\n loftq_bits: # typically 4 bits\n\n# ReLoRA configuration\n# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed\nrelora_steps: # Number of steps per ReLoRA restart\nrelora_warmup_steps: # Number of per-restart warmup steps\nrelora_anneal_steps: # Number of anneal steps for each relora cycle\nrelora_prune_ratio: # threshold for optimizer magnitude when pruning\nrelora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings\n\n# wandb configuration if you're using it\n# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.\nwandb_mode: # \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn off wandb\nwandb_project: # Your wandb project name\nwandb_entity: # A wandb Team name if using a Team\nwandb_watch:\nwandb_name: # Set the name of your wandb run\nwandb_run_id: # Set the ID of your wandb run\nwandb_log_model: # \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only at the end of training\n\n# mlflow configuration if you're using it\nmlflow_tracking_uri: # URI to mlflow\nmlflow_experiment_name: # Your experiment name\nmlflow_run_name: # Your run name\nhf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry\n\n# Comet configuration if you're using it\n# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`.\n# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start\nuse_comet: # Enable or disable Comet integration.\ncomet_api_key: # API key for Comet. Recommended to set via `comet login`.\ncomet_workspace: # Workspace name in Comet. Defaults to the user's default workspace.\ncomet_project_name: # Project name in Comet. Defaults to Uncategorized.\ncomet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key.\ncomet_mode: # Create a new experiment (\"create\") or log to an existing one (\"get\"). Default (\"get_or_create\") auto-selects based on configuration.\ncomet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True.\ncomet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details.\n\n# Tensorboard\nuse_tensorboard: # Optional[bool]\n\n# Where to save the full-finetuned model to\noutput_dir: ./completed-model\n\n# Whether to use torch.compile and which backend to use\n# setting to `auto` will enable torch compile when torch>=2.5.1\ntorch_compile: # Optional[Union[Literal[\"auto\"], bool]]\ntorch_compile_backend: # Optional[str]\n\n# Training hyperparameters\n\n# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.\ngradient_accumulation_steps: 1\n# The number of samples to include in each batch. This is the number of samples sent to each GPU.\n# Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: 2\neval_batch_size:\nnum_epochs: 4\nwarmup_steps: 100 # cannot use with warmup_ratio\nwarmup_ratio: 0.05 # cannot use with warmup_steps\nlearning_rate: 0.00003\nlr_quadratic_warmup:\nlogging_steps:\neval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps\nevals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps\neval_strategy: # Set to `\"no\"` to skip evaluation, `\"epoch\"` at end of each epoch, leave empty to infer from `eval_steps`.\nsave_strategy: # Set to `\"no\"` to skip checkpoint saves, `\"epoch\"` at end of each epoch, `\"best\"` when better result is achieved, leave empty to infer from `save_steps`.\nsave_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps\nsaves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsave_total_limit: # Checkpoints saved at a time\n# Maximum number of iterations to train for. It precedes num_epochs which means that\n# if both are set, num_epochs will not be guaranteed.\n# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps:\n\n# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second: # Optional[bool]\n\n# whether to find batch size that fits in memory. Passed to underlying transformers Trainer\nauto_find_batch_size: # Optional[bool]\n\neval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0\neval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128\ndo_causal_lm_eval: # Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`.\neval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is [\"sacrebleu\", \"comet\", \"ter\", \"chrf\", \"perplexity\"]\n\nprofiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir.\n # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information\n # snapshots can be visualized @ https://pytorch.org/memory_viz\n\nloss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)\nloss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)\n\n# Save model as safetensors (require safetensors package)\nsave_safetensors:\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: false\n# Group similarly sized data to minimize padding.\n# May be slower to start, as it must download and sort the entire dataset.\n# Note that training loss may have an oscillating pattern with this enabled.\ngroup_by_length: false\n\n# Whether to use gradient checkpointing. Available options are: true, false, \"offload\".\n# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: false\n# additional kwargs to pass to the trainer for gradient checkpointing\n# gradient_checkpointing_kwargs:\n# use_reentrant: true\n\n# Stop training after this many evaluation losses have increased in a row\n# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback\nearly_stopping_patience: 3\n\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine\nlr_scheduler_kwargs:\ncosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr\ncosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)\n\n# For one_cycle optim\nlr_div_factor: # Learning rate div factor\n\n# Specify optimizer\n# Valid values are driven by the Transformers OptimizerNames class, see:\n# https://github.com/huggingface/transformers/blob/cbf924b76c03828101a34069a96d209314114fd5/src/transformers/training_args.py#L144-L189\n#\n# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of\n# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used\n# in the examples/ for your model and fine-tuning use case.\n#\n# Valid values for 'optimizer' include:\n# - adamw_torch\n# - adamw_torch_fused\n# - adamw_torch_xla\n# - adamw_torch_npu_fused\n# - adamw_apex_fused\n# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1)\n# - adafactor\n# - adamw_anyprecision\n# - adamw_torch_4bit\n# - ademamix\n# - sgd\n# - adagrad\n# - adamw_bnb_8bit\n# - adamw_8bit # alias for adamw_bnb_8bit\n# - ademamix_8bit\n# - lion_8bit\n# - lion_32bit\n# - paged_adamw_32bit\n# - paged_adamw_8bit\n# - paged_ademamix_32bit\n# - paged_ademamix_8bit\n# - paged_lion_32bit\n# - paged_lion_8bit\n# - rmsprop\n# - rmsprop_bnb\n# - rmsprop_bnb_8bit\n# - rmsprop_bnb_32bit\n# - galore_adamw\n# - galore_adamw_8bit\n# - galore_adafactor\n# - galore_adamw_layerwise\n# - galore_adamw_8bit_layerwise\n# - galore_adafactor_layerwise\n# - lomo\n# - adalomo\n# - grokadamw\n# - schedule_free_adamw\n# - schedule_free_sgd\n# - apollo_adamw\n# - apollo_adamw_layerwise\n#\n# Additional custom optimizers include:\n# - optimi_adamw\n# - ao_adamw_8bit\n# - ao_adamw_fp8\noptimizer:\n# Dictionary of arguments to pass to the optimizer\noptim_args:\n# For Galore Optimizers the following optim_args are available\n# rank: # type: int\n# update_proj_gap # type: int\n# scale # type: float\n# proj_type: # type: str, default = std\n\n# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm\noptim_target_modules:\n# - self_attn # for llama\n# - mlp\n\n# Specify weight decay\nweight_decay:\n# adamw hyperparams\nadam_beta1:\nadam_beta2:\nadam_epsilon:\n# Gradient clipping max norm\nmax_grad_norm:\n\n# Augmentation techniques\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings\n# currently only supported on Llama and Mistral\nneftune_noise_alpha:\n\n# Optional[bool]. Whether to bettertransformers\nflash_optimum:\n\n# Note: Only one of the following attention patches can be used at a time.\n# For example, if you set `xformers_attention` to `true`, do not set `flash_attention` to `true`.\n\n# Optional[bool]. Whether to use xformers attention patch https://github.com/facebookresearch/xformers:\nxformers_attention:\n# Optional[bool]. Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:\nflash_attention:\nflash_attn_cross_entropy: # Optional[bool]. Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_rms_norm: # Optional[bool]. Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_fuse_qkv: # Optional[bool]. Whether to fuse QKV into a single operation\nflash_attn_fuse_mlp: # Optional[bool]. Whether to fuse part of the MLP into a single operation\n# Optional[bool]. Whether to use scaled-dot-product attention\n# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention:\n# Optional[bool]. Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention:\n\n# Optional[bool]. Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage:\n# Optional[str]. Resume from a specific checkpoint dir\nresume_from_checkpoint:\n# Optional[bool]. If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: false\n\n## Multimodal section\n# int | tuple[int, int] | None . Size to resize images to, width x height.\n# Will read from model/processor config if not set.\nimage_size:\n# str. Algorithm to use for image resizing. \"bilinear\", \"bicubic\", \"lanczos\". Default is \"bilinear\".\nimage_resize_algorithm: 'bilinear'\n## End of multimodal section\n\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank:\n\n# Add or change special tokens.\n# If you add tokens here, you don't need to add them to the `tokens` list.\nspecial_tokens:\n # bos_token: \"<s>\"\n # eos_token: \"</s>\"\n # unk_token: \"<unk>\"\n # pad_token: \"[PAD]\"\n\n# Add extra tokens.\ntokens:\n\n# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.\n# Only works for tokens that are not part of the base vocab (aka are added_tokens).\n# Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: # Dict[int, str]\n# 128041: \"<|im_start|>\"\n# 128042: \"<|im_end|>\"\n\n# FSDP\nfsdp:\nfsdp_config:\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed:\n\n# Advanced DDP Arguments\nddp_timeout:\nddp_bucket_cap_mb:\nddp_broadcast_buffers:\n\n# Sequence parallelism\n# Set to a divisor of the number of GPUs available to split sequences into chunks of equal size.\n# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM.\n# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized\n# subsequences, or set to 4 to split into four equal-sized subsequences.\n# See https://axolotl-ai-cloud.github.io/axolotl/docs/sequence_parallelism.html for more details.\nsequence_parallel_degree:\n# Optional; strides across the key dimension. Larger values use more memory but should make training faster.\n# Must evenly divide the number of KV heads in your model.\nheads_k_stride: 1\n\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path:\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset:\n\n# Debug mode\ndebug:\n\n# Seed\nseed:\n\n# Allow overwrite yml config using from cli\nstrict:", "crumbs": [ "Getting Started", "Config Reference" @@ -1390,102 +1390,337 @@ ] }, { - "objectID": "docs/inference.html", - "href": "docs/inference.html", - "title": "Inference and Merging", + "objectID": "docs/dataset_loading.html", + "href": "docs/dataset_loading.html", + "title": "Dataset Loading", "section": "", - "text": "This guide covers how to use your trained models for inference, including model loading, interactive testing, merging adapters, and common troubleshooting steps.", + "text": "Datasets can be loaded in a number of different ways depending on the how it is saved (the extension of the file) and where it is stored.", "crumbs": [ - "Getting Started", - "Inference and Merging" + "How To Guides", + "Dataset Loading" ] }, { - "objectID": "docs/inference.html#sec-quickstart", - "href": "docs/inference.html#sec-quickstart", - "title": "Inference and Merging", - "section": "1 Quick Start", - "text": "1 Quick Start\n\n\n\n\n\n\nTip\n\n\n\nUse the same config used for training on inference/merging.\n\n\n\n1.1 Basic Inference\n\nLoRA ModelsFull Fine-tuned Models\n\n\naxolotl inference your_config.yml --lora-model-dir=\"./lora-output-dir\"\n\n\naxolotl inference your_config.yml --base-model=\"./completed-model\"", - "crumbs": [ - "Getting Started", - "Inference and Merging" - ] - }, - { - "objectID": "docs/inference.html#sec-advanced", - "href": "docs/inference.html#sec-advanced", - "title": "Inference and Merging", - "section": "2 Advanced Usage", - "text": "2 Advanced Usage\n\n2.1 Gradio Interface\nLaunch an interactive web interface:\naxolotl inference your_config.yml --gradio\n\n\n2.2 File-based Prompts\nProcess prompts from a text file:\ncat /tmp/prompt.txt | axolotl inference your_config.yml \\\n --base-model=\"./completed-model\" --prompter=None\n\n\n2.3 Memory Optimization\nFor large models or limited memory:\naxolotl inference your_config.yml --load-in-8bit=True", - "crumbs": [ - "Getting Started", - "Inference and Merging" - ] - }, - { - "objectID": "docs/inference.html#sec-merging", - "href": "docs/inference.html#sec-merging", - "title": "Inference and Merging", - "section": "3 Merging LoRA Weights", - "text": "3 Merging LoRA Weights\nMerge LoRA adapters with the base model:\naxolotl merge-lora your_config.yml --lora-model-dir=\"./completed-model\"\n\n3.1 Memory Management for Merging\n\nConfiguration OptionsForce CPU Merging\n\n\ngpu_memory_limit: 20GiB # Adjust based on your GPU\nlora_on_cpu: true # Process on CPU if needed\n\n\nCUDA_VISIBLE_DEVICES=\"\" axolotl merge-lora ...", - "crumbs": [ - "Getting Started", - "Inference and Merging" - ] - }, - { - "objectID": "docs/inference.html#sec-tokenization", - "href": "docs/inference.html#sec-tokenization", - "title": "Inference and Merging", - "section": "4 Tokenization", - "text": "4 Tokenization\n\n4.1 Common Issues\n\n\n\n\n\n\nWarning\n\n\n\nTokenization mismatches between training and inference are a common source of problems.\n\n\nTo debug:\n\nCheck training tokenization:\n\naxolotl preprocess your_config.yml --debug\n\nVerify inference tokenization by decoding tokens before model input\nCompare token IDs between training and inference\n\n\n\n4.2 Special Tokens\nConfigure special tokens in your YAML:\nspecial_tokens:\n bos_token: \"<s>\"\n eos_token: \"</s>\"\n unk_token: \"<unk>\"\ntokens:\n - \"<|im_start|>\"\n - \"<|im_end|>\"", - "crumbs": [ - "Getting Started", - "Inference and Merging" - ] - }, - { - "objectID": "docs/inference.html#sec-troubleshooting", - "href": "docs/inference.html#sec-troubleshooting", - "title": "Inference and Merging", - "section": "5 Troubleshooting", - "text": "5 Troubleshooting\n\n5.1 Common Problems\n\nMemory IssuesToken IssuesPerformance Issues\n\n\n\nUse 8-bit loading\nReduce batch sizes\nTry CPU offloading\n\n\n\n\nVerify special tokens\nCheck tokenizer settings\nCompare training and inference preprocessing\n\n\n\n\nVerify model loading\nCheck prompt formatting\nEnsure temperature/sampling settings\n\n\n\n\nFor more details, see our debugging guide.", - "crumbs": [ - "Getting Started", - "Inference and Merging" - ] - }, - { - "objectID": "docs/multipack.html", - "href": "docs/multipack.html", - "title": "Multipack (Sample Packing)", + "objectID": "docs/dataset_loading.html#overview", + "href": "docs/dataset_loading.html#overview", + "title": "Dataset Loading", "section": "", - "text": "Because Flash Attention simply drops the attention mask, we do not need to\nconstruct a 4d attention mask. We only need to concatenate the sequences into\na single batch and let flash attention know where each new sequence begins.\n4k context, bsz =4,\neach character represents 256 tokens\nX represents a padding token\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n[[ A A A A A A A A A A A ]\n B B B B B B ]\n C C C C C C C ]\n D D D D ]]\n\n[[ E E E E E E E E ]\n [ F F F F ]\n [ G G G ]\n [ H H H H ]]\n\n[[ I I I ]\n [ J J J ]\n [ K K K K K]\n [ L L L ]]\nafter padding to longest input in each step\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n[[ A A A A A A A A A A A ]\n B B B B B B X X X X X X ]\n C C C C C C C X X X X ]\n D D D D X X X X X X X ]]\n\n[[ E E E E E E E E ]\n [ F F F F X X X X ]\n [ G G G X X X X X ]\n [ H H H H X X X X ]]\n\n[[ I I I X X ]\n [ J J J X X ]\n [ K K K K K ]\n [ L L L X X ]]\nw packing ( note it’s the same effective number of tokens per step, but a true bsz of 1)\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n[[ A A A A A A A A A A A B B B B B\n B C C C C C C C D D D D E E E E\n E E E E F F F F F G G G H H H H\n I I I J J J J K K K K K L L L X ]]\ncu_seqlens:\n[[ 0, 11, 17, 24, 28, 36, 41 44, 48, 51, 55, 60, 64]]", + "text": "Datasets can be loaded in a number of different ways depending on the how it is saved (the extension of the file) and where it is stored.", + "crumbs": [ + "How To Guides", + "Dataset Loading" + ] + }, + { + "objectID": "docs/dataset_loading.html#loading-datasets", + "href": "docs/dataset_loading.html#loading-datasets", + "title": "Dataset Loading", + "section": "Loading Datasets", + "text": "Loading Datasets\nWe use the datasets library to load datasets and a mix of load_dataset and load_from_disk to load them.\nYou may recognize the similar named configs between load_dataset and the datasets section of the config file.\ndatasets:\n - path:\n name:\n data_files:\n split:\n revision:\n trust_remote_code:\n\n\n\n\n\n\nTip\n\n\n\nDo not feel overwhelmed by the number of options here. A lot of them are optional. In fact, the most common config to use would be path and sometimes data_files.\n\n\nThis matches the API of datasets.load_dataset, so if you’re familiar with that, you will feel right at home.\nFor HuggingFace’s guide to load different dataset types, see here.\nFor full details on the config, see config.qmd.\n\n\n\n\n\n\nNote\n\n\n\nYou can set multiple datasets in the config file by more than one entry under datasets.\ndatasets:\n - path: /path/to/your/dataset\n - path: /path/to/your/other/dataset\n\n\n\nLocal dataset\n\nFiles\nUsually, to load a JSON file, you would do something like this:\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"json\", data_files=\"data.json\")\nWhich translates to the following config:\ndatasets:\n - path: json\n data_files: /path/to/your/file.jsonl\nHowever, to make things easier, we have added a few shortcuts for loading local dataset files.\nYou can just point the path to the file or directory along with the ds_type to load the dataset. The below example shows for a JSON file:\ndatasets:\n - path: /path/to/your/file.jsonl\n ds_type: json\nThis works for CSV, JSON, Parquet, and Arrow files.\n\n\n\n\n\n\nTip\n\n\n\nIf path points to a file and ds_type is not specified, we will automatically infer the dataset type from the file extension, so you could omit ds_type if you’d like.\n\n\n\n\nDirectory\nIf you’re loading a directory, you can point the path to the directory.\nThen, you have two options:\n\nLoading entire directory\nYou do not need any additional configs.\nWe will attempt to load in the following order:\n- datasets saved with datasets.save_to_disk\n- loading entire directory of files (such as with parquet/arrow files)\ndatasets:\n - path: /path/to/your/directory\n\n\nLoading specific files in directory\nProvide data_files with a list of files to load.\ndatasets:\n # single file\n - path: /path/to/your/directory\n ds_type: csv\n data_files: file1.csv\n\n # multiple files\n - path: /path/to/your/directory\n ds_type: json\n data_files:\n - file1.jsonl\n - file2.jsonl\n\n # multiple files for parquet\n - path: /path/to/your/directory\n ds_type: parquet\n data_files:\n - file1.parquet\n - file2.parquet\n\n\n\n\nHuggingFace Hub\nThe method you use to load the dataset depends on how the dataset was created, whether a folder was uploaded directly or a HuggingFace Dataset was pushed.\n\n\n\n\n\n\nNote\n\n\n\nIf you’re using a private dataset, you will need to enable the hf_use_auth_token flag in the root-level of the config file.\n\n\n\nFolder uploaded\nThis would mean that the dataset is a single file or file(s) uploaded to the Hub.\ndatasets:\n - path: org/dataset-name\n data_files:\n - file1.jsonl\n - file2.jsonl\n\n\nHuggingFace Dataset\nThis means that the dataset is created as a HuggingFace Dataset and pushed to the Hub via datasets.push_to_hub.\ndatasets:\n - path: org/dataset-name\n\n\n\n\n\n\nNote\n\n\n\nThere are some other configs which may be required like name, split, revision, trust_remote_code, etc depending on the dataset.\n\n\n\n\n\nRemote Filesystems\nVia the storage_options config under load_dataset, you can load datasets from remote filesystems like S3, GCS, Azure, and OCI.\n\n\n\n\n\n\nWarning\n\n\n\nThis is currently experimental. Please let us know if you run into any issues!\n\n\nThe only difference between the providers is that you need to prepend the path with the respective protocols.\ndatasets:\n # Single file\n - path: s3://bucket-name/path/to/your/file.jsonl\n\n # Directory\n - path: s3://bucket-name/path/to/your/directory\nFor directory, we load via load_from_disk.\n\nS3\nPrepend the path with s3://.\nThe credentials are pulled in the following order:\n\nAWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and AWS_SESSION_TOKEN environment variables\nfrom the ~/.aws/credentials file\nfor nodes on EC2, the IAM metadata provider\n\n\n\n\n\n\n\nNote\n\n\n\nWe assume you have credentials setup and not using anonymous access. If you want to use anonymous access, let us know! We may have to open a config option for this.\n\n\nOther environment variables that can be set can be found in boto3 docs\n\n\nGCS\nPrepend the path with gs:// or gcs://.\nThe credentials are loaded in the following order:\n\ngcloud credentials\nfor nodes on GCP, the google metadata service\nanonymous access\n\n\n\nAzure\n\nGen 1\nPrepend the path with adl://.\nEnsure you have the following environment variables set:\n\nAZURE_STORAGE_TENANT_ID\nAZURE_STORAGE_CLIENT_ID\nAZURE_STORAGE_CLIENT_SECRET\n\n\n\nGen 2\nPrepend the path with abfs:// or az://.\nEnsure you have the following environment variables set:\n\nAZURE_STORAGE_ACCOUNT_NAME\nAZURE_STORAGE_ACCOUNT_KEY\n\nOther environment variables that can be set can be found in adlfs docs\n\n\n\nOCI\nPrepend the path with oci://.\nIt would attempt to read in the following order:\n\nOCIFS_IAM_TYPE, OCIFS_CONFIG_LOCATION, and OCIFS_CONFIG_PROFILE environment variables\nwhen on OCI resource, resource principal\n\nOther environment variables:\n\nOCI_REGION_METADATA\n\nPlease see the ocifs docs.\n\n\n\nHTTPS\nThe path should start with https://.\ndatasets:\n - path: https://path/to/your/dataset/file.jsonl\nThis must be publically accessible.", + "crumbs": [ + "How To Guides", + "Dataset Loading" + ] + }, + { + "objectID": "docs/dataset_loading.html#next-steps", + "href": "docs/dataset_loading.html#next-steps", + "title": "Dataset Loading", + "section": "Next steps", + "text": "Next steps\nNow that you know how to load datasets, you can learn more on how to load your specific dataset format into your target output format dataset formats docs.", + "crumbs": [ + "How To Guides", + "Dataset Loading" + ] + }, + { + "objectID": "docs/batch_vs_grad.html", + "href": "docs/batch_vs_grad.html", + "title": "Batch size vs Gradient accumulation", + "section": "", + "text": "Gradient accumulation means accumulating gradients over several mini-batches and updating the model weights afterward. When the samples in each batch are diverse, this technique doesn’t significantly impact learning.\nThis method allows for effective training with larger effective batch sizes without needing proportionally larger memory. Here’s why:\n\nMemory Consumption with Batch Size: The primary reason increasing the batch size impacts memory is due to the storage requirements for intermediate activations. When you forward propagate a batch through a network, you have to store the activations at each layer for each sample in the batch, because these activations are used during backpropagation to compute gradients. Therefore, larger batches mean more activations, leading to greater GPU memory consumption.\nGradient Accumulation: With gradient accumulation, you’re effectively simulating a larger batch size by accumulating gradients over several smaller batches (or micro-batches). However, at any given time, you’re only forward and backward propagating a micro-batch. This means you only store activations for the micro-batch, not the full accumulated batch. As a result, you can simulate the effect of a larger batch size without the memory cost of storing activations for a large batch.\n\nExample 1:\nMicro batch size: 3\nGradient accumulation steps: 2\nNumber of GPUs: 3\nTotal batch size = 3 * 2 * 3 = 18\n| GPU 1 | GPU 2 | GPU 3 |\n|----------------|----------------|----------------|\n| S1, S2, S3 | S4, S5, S6 | S7, S8, S9 |\n| e1, e2, e3 | e4, e5, e6 | e7, e8, e9 |\n|----------------|----------------|----------------|\n| → (accumulate) | → (accumulate) | → (accumulate) |\n|----------------|----------------|----------------|\n| S10, S11, S12 | S13, S14, S15 | S16, S17, S18 |\n| e10, e11, e12 | e13, e14, e15 | e16, e17, e18 |\n|----------------|----------------|----------------|\n| → (apply) | → (apply) | → (apply) |\n\nAccumulated gradient for the weight w1 after the second iteration (considering all GPUs):\nTotal gradient for w1 = e1 + e2 + e3 + e4 + e5 + e6 + e7 + e8 + e9 + e10 + e11 + e12 + e13 + e14 + e15 + e16 + e17 + e18\n\nWeight update for w1:\nw1_new = w1_old - learning rate x (Total gradient for w1 / 18)\nExample 2:\nMicro batch size: 2\nGradient accumulation steps: 1\nNumber of GPUs: 3\nTotal batch size = 2 * 1 * 3 = 6\n| GPU 1 | GPU 2 | GPU 3 |\n|-----------|-----------|-----------|\n| S1, S2 | S3, S4 | S5, S6 |\n| e1, e2 | e3, e4 | e5, e6 |\n|-----------|-----------|-----------|\n| → (apply) | → (apply) | → (apply) |\n\nAccumulated gradient for the weight w1 (considering all GPUs):\nTotal gradient for w1 = e1 + e2 + e3 + e4 + e5 + e6\n\nWeight update for w1:\nw1_new = w1_old - learning rate × (Total gradient for w1 / 6)", "crumbs": [ "Core Concepts", - "Multipack (Sample Packing)" + "Batch size vs Gradient accumulation" ] }, { - "objectID": "docs/multipack.html#visualization-of-multipack-with-flash-attention", - "href": "docs/multipack.html#visualization-of-multipack-with-flash-attention", - "title": "Multipack (Sample Packing)", + "objectID": "docs/faq.html", + "href": "docs/faq.html", + "title": "FAQ", "section": "", - "text": "Because Flash Attention simply drops the attention mask, we do not need to\nconstruct a 4d attention mask. We only need to concatenate the sequences into\na single batch and let flash attention know where each new sequence begins.\n4k context, bsz =4,\neach character represents 256 tokens\nX represents a padding token\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n[[ A A A A A A A A A A A ]\n B B B B B B ]\n C C C C C C C ]\n D D D D ]]\n\n[[ E E E E E E E E ]\n [ F F F F ]\n [ G G G ]\n [ H H H H ]]\n\n[[ I I I ]\n [ J J J ]\n [ K K K K K]\n [ L L L ]]\nafter padding to longest input in each step\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n[[ A A A A A A A A A A A ]\n B B B B B B X X X X X X ]\n C C C C C C C X X X X ]\n D D D D X X X X X X X ]]\n\n[[ E E E E E E E E ]\n [ F F F F X X X X ]\n [ G G G X X X X X ]\n [ H H H H X X X X ]]\n\n[[ I I I X X ]\n [ J J J X X ]\n [ K K K K K ]\n [ L L L X X ]]\nw packing ( note it’s the same effective number of tokens per step, but a true bsz of 1)\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n[[ A A A A A A A A A A A B B B B B\n B C C C C C C C D D D D E E E E\n E E E E F F F F F G G G H H H H\n I I I J J J J K K K K K L L L X ]]\ncu_seqlens:\n[[ 0, 11, 17, 24, 28, 36, 41 44, 48, 51, 55, 60, 64]]", + "text": "General\nQ: The trainer stopped and hasn’t progressed in several minutes.\n\nA: Usually an issue with the GPUs communicating with each other. See the NCCL doc\n\nQ: Exitcode -9\n\nA: This usually happens when you run out of system RAM.\n\nQ: Exitcode -7 while using deepspeed\n\nA: Try upgrading deepspeed w: pip install -U deepspeed\n\nQ: AttributeError: ‘DummyOptim’ object has no attribute ‘step’\nQ: ModuleNotFoundError: No module named ‘mpi4py’ using single GPU with deepspeed\n\nA: You may be using deepspeed with single gpu. Please remove the deepspeed: section in the yaml file or --deepspeed CLI flag.\n\nQ: The codes is stuck on saving preprocessed datasets.\n\nA: This is usually an issue with the GPU. This can be resolved through setting the os environment variable CUDA_VISIBLE_DEVICES=0. If you are on runpod, this is usually a pod issue. Starting a new pod should take care of it.\n\nQ: Received mismatch error on merge adapters / loading adapters between torch.Size of checkpoint and model.\n\nA: This is likely due to vocab size mismatch. By default, Axolotl expands the model’s embeddings if the tokenizer has more tokens than the model. Please use the axolotl merge-lora command to merge the adapters instead of using your own scripts.\n\n\nOn the other hand, if the model has more tokens than the tokenizer, Axolotl does not shrink the model’s embeddings unless shrink_embeddings: true is set in the config.\n\nQ: How to call Axolotl via custom python scripts?\n\nA: Since Axolotl is just Python, please see src/axolotl/cli/main.py on how each command is called.\n\nQ: How to know the value to use for fsdp_transformer_layer_cls_to_wrap?\n\nA: This is the class name of the transformer layer to wrap with FSDP. For example, for LlamaForCausalLM, the value is LlamaDecoderLayer. To find this for a specific model, check the model’s PreTrainedModel definition and look for _no_split_modules variable in the modeling_<model_name>.py file within transformers library.\n\nQ: ValueError: Asking to pad but the tokenizer does not have a padding token. Please select a token to use as pad_token\n\nA: This is because the tokenizer does not have a padding token. Please add a padding token to the tokenizer via:\n\n\nspecial_tokens:\n # str. If you're not sure, set to same as `eos_token`.\n pad_token: \"...\"\n\n\n\nChat templates\nQ: jinja2.exceptions.UndefinedError: 'dict object' has no attribute 'content' / 'role' / ____\n\nA: This means that the property mapping for the stated attribute does not exist when building chat_template prompt. For example, if no attribute 'content', please check you have added the correct mapping for content under message_property_mappings.\n\nQ: Empty template generated for turn ___\n\nA: The content is empty for that turn.\n\nQ: Could not find content start/end boundary for turn __\n\nA: The specific turn’s start/end could not be detected. Please ensure you have set the eos_token following your chat_template. Otherwise, this could be a chat_template which doesn’t use proper boundaries for each turn (like system). On the rare occurrence, make sure your content is not [[dummy_message]]. Please let us know about this.\n\nQ: Content end boundary is before start boundary for turn ___\n\nA: This is an edge case which should not occur. Please create an Issue if this happens.\n\nQ: Content end boundary is the same as start boundary for turn ___. This is likely an empty turn.\n\nA: This is likely an empty turn.\n\nQ: The EOS/EOT token is incorrectly being masked or not being masked.\n\nA: This is because of the mismatch between tokenizer.eos_token and EOS/EOT token in template. Please make sure to set eos_token under special_tokens to the same EOS/EOT token as in template.\n\nQ: “chat_template choice is tokenizer_default but tokenizer’s chat_template is null. Please add a chat_template in tokenizer config”\n\nA: This is because the tokenizer does not have a chat template. Please add a chat template in the tokenizer config. See chat_template for more details.", "crumbs": [ - "Core Concepts", - "Multipack (Sample Packing)" + "Troubleshooting", + "FAQ" ] }, { - "objectID": "docs/multipack.html#multipack-without-flash-attention", - "href": "docs/multipack.html#multipack-without-flash-attention", - "title": "Multipack (Sample Packing)", - "section": "Multipack without Flash Attention", - "text": "Multipack without Flash Attention\nMultipack can still be achieved without Flash attention, but with lower packing\nefficiency as we are not able to join multiple batches into a single batch due to\ncontext length limits without flash attention. We can use either Pytorch’s Scaled\nDot Product Attention implementation or native Pytorch attention implementation\nalong with 4d attention masks\nto pack sequences together and avoid cross attention.", + "objectID": "docs/debugging.html", + "href": "docs/debugging.html", + "title": "Debugging", + "section": "", + "text": "This document provides some tips and tricks for debugging Axolotl. It also provides an example configuration for debugging with VSCode. A good debugging setup is essential to understanding how Axolotl code works behind the scenes.", "crumbs": [ - "Core Concepts", - "Multipack (Sample Packing)" + "Troubleshooting", + "Debugging" + ] + }, + { + "objectID": "docs/debugging.html#table-of-contents", + "href": "docs/debugging.html#table-of-contents", + "title": "Debugging", + "section": "Table of Contents", + "text": "Table of Contents\n\nGeneral Tips\nDebugging with VSCode\n\nBackground\nConfiguration\nCustomizing your debugger\nVideo Tutorial\n\nDebugging With Docker\n\nSetup\nAttach To Container\nVideo - Attaching To Docker On Remote Host", + "crumbs": [ + "Troubleshooting", + "Debugging" + ] + }, + { + "objectID": "docs/debugging.html#general-tips", + "href": "docs/debugging.html#general-tips", + "title": "Debugging", + "section": "General Tips", + "text": "General Tips\nWhile debugging it’s helpful to simplify your test scenario as much as possible. Here are some tips for doing so:\n\n[!Important]\nAll of these tips are incorporated into the example configuration for debugging with VSCode below.\n\n\nMake sure you are using the latest version of axolotl: This project changes often and bugs get fixed fast. Check your git branch and make sure you have pulled the latest changes from main.\nEliminate concurrency: Restrict the number of processes to 1 for both training and data preprocessing:\n\nSet CUDA_VISIBLE_DEVICES to a single GPU, ex: export CUDA_VISIBLE_DEVICES=0.\nSet dataset_processes: 1 in your axolotl config or run the training command with --dataset_processes=1.\n\nUse a small dataset: Construct or use a small dataset from HF Hub. When using a small dataset, you will often have to make sure sample_packing: False and eval_sample_packing: False to avoid errors. If you are in a pinch and don’t have time to construct a small dataset but want to use from the HF Hub, you can shard the data (this will still tokenize the entire dataset, but will only use a fraction of the data for training. For example, to shard the dataset into 20 pieces, add the following to your axolotl config):\ndatasets:\n ...\n shards: 20\nUse a small model: A good example of a small model is TinyLlama/TinyLlama-1.1B-Chat-v1.0.\nMinimize iteration time: Make sure the training loop finishes as fast as possible, with these settings.\n\nmicro_batch_size: 1\nmax_steps: 1\nval_set_size: 0\n\nClear Caches: Axolotl caches certain steps and so does the underlying HuggingFace trainer. You may want to clear some of these caches when debugging.\n\nData preprocessing: When debugging data preprocessing, which includes prompt template formation, you may want to delete the directory set in dataset_prepared_path: in your axolotl config. If you didn’t set this value, the default is last_run_prepared.\nHF Hub: If you are debugging data preprocessing, you should clear the relevant HF cache HuggingFace cache, by deleting the appropriate ~/.cache/huggingface/datasets/... folder(s).\nThe recommended approach is to redirect all outputs and caches to a temporary folder and delete selected subfolders before each run. This is demonstrated in the example configuration below.", + "crumbs": [ + "Troubleshooting", + "Debugging" + ] + }, + { + "objectID": "docs/debugging.html#debugging-with-vscode", + "href": "docs/debugging.html#debugging-with-vscode", + "title": "Debugging", + "section": "Debugging with VSCode", + "text": "Debugging with VSCode\n\nBackground\nThe below example shows how to configure VSCode to debug data preprocessing of the chat_template format. This is the format used when you have the following in your axolotl config:\ndatasets:\n - path: <path to your chat_template formatted dataset> # example on HF Hub: fozziethebeat/alpaca_messages_2k_test\n type: chat_template\n\n[!Important]\nIf you are already familiar with advanced VSCode debugging, you can skip the below explanation and look at the files .vscode/launch.json and .vscode/tasks.json for an example configuration.\n\n\n[!Tip]\nIf you prefer to watch a video, rather than read, you can skip to the video tutorial below (but doing both is recommended).\n\n\n\nSetup\nMake sure you have an editable install of Axolotl, which ensures that changes you make to the code are reflected at runtime. Run the following commands from the root of this project:\npip3 install packaging\npip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'\n\nRemote Hosts\nIf you developing on a remote host, you can easily use VSCode to debug remotely. To do so, you will need to follow this remote - SSH guide. You can also see the video below on Docker and Remote SSH debugging.\n\n\n\nConfiguration\nThe easiest way to get started is to modify the .vscode/launch.json file in this project. This is just an example configuration, so you may need to modify or copy it to suit your needs.\nFor example, to mimic the command cd devtools && CUDA_VISIBLE_DEVICES=0 accelerate launch -m axolotl.cli.train dev_chat_template.yml, you would use the below configuration1. Note that we add additional flags that override the axolotl config and incorporate the tips above (see the comments). We also set the working directory to devtools and set the env variable HF_HOME to a temporary folder that is later partially deleted. This is because we want to delete the HF dataset cache before each run in order to ensure that the data preprocessing code is run from scratch.\n// .vscode/launch.json\n{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Debug axolotl prompt - chat_template\",\n \"type\": \"python\",\n \"module\": \"accelerate.commands.launch\",\n \"request\": \"launch\",\n \"args\": [\n \"-m\", \"axolotl.cli.train\", \"dev_chat_template.yml\",\n // The flags below simplify debugging by overriding the axolotl config\n // with the debugging tips above. Modify as needed.\n \"--dataset_processes=1\", // limits data preprocessing to one process\n \"--max_steps=1\", // limits training to just one step\n \"--batch_size=1\", // minimizes batch size\n \"--micro_batch_size=1\", // minimizes batch size\n \"--val_set_size=0\", // disables validation\n \"--sample_packing=False\", // disables sample packing which is necessary for small datasets\n \"--eval_sample_packing=False\",// disables sample packing on eval set\n \"--dataset_prepared_path=temp_debug/axolotl_outputs/data\", // send data outputs to a temp folder\n \"--output_dir=temp_debug/axolotl_outputs/model\" // send model outputs to a temp folder\n ],\n \"console\": \"integratedTerminal\", // show output in the integrated terminal\n \"cwd\": \"${workspaceFolder}/devtools\", // set working directory to devtools from the root of the project\n \"justMyCode\": true, // step through only axolotl code\n \"env\": {\"CUDA_VISIBLE_DEVICES\": \"0\", // Since we aren't doing distributed training, we need to limit to one GPU\n \"HF_HOME\": \"${workspaceFolder}/devtools/temp_debug/.hf-cache\"}, // send HF cache to a temp folder\n \"preLaunchTask\": \"cleanup-for-dataprep\", // delete temp folders (see below)\n }\n ]\n}\nAdditional notes about this configuration:\n\nThe argument justMyCode is set to true such that you step through only the axolotl code. If you want to step into dependencies, set this to false.\nThe preLaunchTask: cleanup-for-dataprep is defined in .vscode/tasks.json and is used to delete the following folders before debugging, which is essential to ensure that the data pre-processing code is run from scratch:\n\n./devtools/temp_debug/axolotl_outputs\n./devtools/temp_debug/.hf-cache/datasets\n\n\n\n[!Tip]\nYou may not want to delete these folders. For example, if you are debugging model training instead of data pre-processing, you may NOT want to delete the cache or output folders. You may also need to add additional tasks to the tasks.json file depending on your use case.\n\nBelow is the ./vscode/tasks.json file that defines the cleanup-for-dataprep task. This task is run before each debugging session when you use the above configuration. Note how there are two tasks that delete the two folders mentioned above. The third task cleanup-for-dataprep is a composite task that combines the two tasks. A composite task is necessary because VSCode does not allow you to specify multiple tasks in the preLaunchTask argument of the launch.json file.\n// .vscode/tasks.json\n// this file is used by launch.json\n{\n \"version\": \"2.0.0\",\n \"tasks\": [\n // this task changes into the devtools directory and deletes the temp_debug/axolotl_outputs folder\n {\n \"label\": \"delete-outputs\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/axolotl_outputs\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task changes into the devtools directory and deletes the `temp_debug/.hf-cache/datasets` folder\n {\n \"label\": \"delete-temp-hf-dataset-cache\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/.hf-cache/datasets\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task combines the two tasks above\n {\n \"label\": \"cleanup-for-dataprep\",\n \"dependsOn\": [\"delete-outputs\", \"delete-temp-hf-dataset-cache\"],\n }\n ]\n}\n\n\nCustomizing your debugger\nYour debugging use case may differ from the example above. The easiest thing to do is to put your own axolotl config in the devtools folder and modify the launch.json file to use your config. You may also want to modify the preLaunchTask to delete different folders or not delete anything at all.\n\n\nVideo Tutorial\nThe following video tutorial walks through the above configuration and demonstrates how to debug with VSCode, (click the image below to watch):\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl w/VSCode", + "crumbs": [ + "Troubleshooting", + "Debugging" + ] + }, + { + "objectID": "docs/debugging.html#debugging-with-docker", + "href": "docs/debugging.html#debugging-with-docker", + "title": "Debugging", + "section": "Debugging With Docker", + "text": "Debugging With Docker\nUsing official Axolotl Docker images is a great way to debug your code, and is a very popular way to use Axolotl. Attaching VSCode to Docker takes a few more steps.\n\nSetup\nOn the host that is running axolotl (ex: if you are using a remote host), clone the axolotl repo and change your current directory to the root:\ngit clone https://github.com/axolotl-ai-cloud/axolotl\ncd axolotl\n\n[!Tip]\nIf you already have axolotl cloned on your host, make sure you have the latest changes and change into the root of the project.\n\nNext, run the desired docker image and mount the current directory. Below is a docker command you can run to do this:2\ndocker run --privileged --gpus '\"all\"' --shm-size 10g --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=bind,src=\"${PWD}\",target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface axolotlai/axolotl:main-py3.10-cu118-2.0.1\n\n[!Tip]\nTo understand which containers are available, see the Docker section of the README and the DockerHub repo. For details of how the Docker containers are built, see axolotl’s Docker CI builds.\n\nYou will now be in the container. Next, perform an editable install of Axolotl:\npip3 install packaging\npip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'\n\n\nAttach To Container\nNext, if you are using a remote host, Remote into this host with VSCode. If you are using a local host, you can skip this step.\nNext, select Dev Containers: Attach to Running Container... using the command palette (CMD + SHIFT + P) in VSCode. You will be prompted to select a container to attach to. Select the container you just created. You will now be in the container with a working directory that is at the root of the project. Any changes you make to the code will be reflected both in the container and on the host.\nNow you are ready to debug as described above (see Debugging with VSCode).\n\n\nVideo - Attaching To Docker On Remote Host\nHere is a short video that demonstrates how to attach to a Docker container on a remote host:\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl Part 2: Attaching to Docker on a Remote Host", + "crumbs": [ + "Troubleshooting", + "Debugging" + ] + }, + { + "objectID": "docs/debugging.html#footnotes", + "href": "docs/debugging.html#footnotes", + "title": "Debugging", + "section": "Footnotes", + "text": "Footnotes\n\n\nThe config actually mimics the command CUDA_VISIBLE_DEVICES=0 python -m accelerate.commands.launch -m axolotl.cli.train devtools/chat_template.yml, but this is the same thing.↩︎\nMany of the below flags are recommended best practices by Nvidia when using nvidia-container-toolkit. You can read more about these flags here.↩︎", + "crumbs": [ + "Troubleshooting", + "Debugging" + ] + }, + { + "objectID": "docs/lr_groups.html", + "href": "docs/lr_groups.html", + "title": "Learning Rate Groups", + "section": "", + "text": "Inspired by LoRA+, Axolotl allows practitioners to specify separate learning rates for each module or groups of\nmodules in a model.", + "crumbs": [ + "How To Guides", + "Learning Rate Groups" + ] + }, + { + "objectID": "docs/lr_groups.html#background", + "href": "docs/lr_groups.html#background", + "title": "Learning Rate Groups", + "section": "", + "text": "Inspired by LoRA+, Axolotl allows practitioners to specify separate learning rates for each module or groups of\nmodules in a model.", + "crumbs": [ + "How To Guides", + "Learning Rate Groups" + ] + }, + { + "objectID": "docs/lr_groups.html#example", + "href": "docs/lr_groups.html#example", + "title": "Learning Rate Groups", + "section": "Example", + "text": "Example\nlr_groups:\n - name: o_proj\n modules:\n - self_attn.o_proj.weight\n lr: 1e-6\n - name: q_proj\n modules:\n - model.layers.2.self_attn.q_proj.weight\n lr: 1e-5\n\nlearning_rate: 2e-5\nIn this example, we have a default learning rate of 2e-5 across the entire model, but we have a separate learning rate\nof 1e-6 for all the self attention o_proj modules across all layers, and a learning are of 1e-5 to the 3rd layer’s\nself attention q_proj module.", + "crumbs": [ + "How To Guides", + "Learning Rate Groups" + ] + }, + { + "objectID": "TODO.html", + "href": "TODO.html", + "title": "todo list", + "section": "", + "text": "[] Validation of parameters for combinations that won’t work\n\n\n\n\nFSDP offload and gradient_checkpointing - https://github.com/pytorch/pytorch/issues/82203\nadamw_bnb_8bit doesn’t play well with FSDP offload" + }, + { + "objectID": "TODO.html#things-that-are-known-not-to-work", + "href": "TODO.html#things-that-are-known-not-to-work", + "title": "todo list", + "section": "", + "text": "FSDP offload and gradient_checkpointing - https://github.com/pytorch/pytorch/issues/82203\nadamw_bnb_8bit doesn’t play well with FSDP offload" + }, + { + "objectID": "src/axolotl/integrations/LICENSE.html", + "href": "src/axolotl/integrations/LICENSE.html", + "title": "Axolotl", + "section": "", + "text": "AXOLOTL COMMUNITY LICENSE AGREEMENT\nThis Axolotl Community License Agreement (“Agreement”) is entered into by and between Axolotl AI Corp. (“Axolotl”) and\nany individual or entity (“Licensee”) who wishes to use the Software (as defined below) in accordance with the terms\nand conditions set forth in this Agreement.\n\nDefinitions\n1.1 “Licensee” refers to any individual or entity who has obtained a copy of the Software under this Agreement.\n1.2 “Plugin Integration” means independent integration software modules which may or may not be offered by Axolotl,\nwhich may be licensed separately by their respective authors and/or licensors.\n1.3 “Software” refers to the specific sub-directory of the Axolotl, Inc. software located at\nhttps://github.com/axolotl-ai-cloud/axolotl/tree/main/src/axolotl/integrations and its subdirectories which\npermits Plugin Integrations to integrate with the Axolotl service.\nGrant of License\n2.1 Axolotl hereby grants Licensee a worldwide, non-exclusive, royalty-free, license to use, copy, modify, merge,\npublish, distribute, sublicense, and/or otherwise exploit the Software, subject to the following conditions:\n- Licensee must comply with all the terms and conditions of this Agreement.\n- Licensee must include the original copyright notice and disclaimer of warranty in all copies or substantial\nportions of the Software.\n2.2 Licensee may use the Software for any lawful purpose, except as restricted in Section 3.\nRestrictions\n3.1 Licensee shall not use the Software for any activity that constitutes a commercial activity of offering for\nfree or for sale any services, platform, or equivalent to third parties for the purposes of allowing such\nthird parties to fine-tune artificial intelligence models.\n3.2 Licensee shall not:\n- Use the Software for any illegal or unauthorized purpose.\n- Reverse engineer, decompile, or disassemble the Software.\n- Remove or modify any copyright, trademark, or other proprietary notices contained in the Software.\n- Use the Software in a way that could damage, disable, overburden, or impair the functionality of the\nSoftware or interfere with any third-party use of the Software.\n3.3 Axolotl reserves the right to restrict certain Plugin Integrations for use with the Software. To the extent Licensee integrates a permitted, applicable Plugin Integration with the Software, Licensee shall comply with any additional terms and conditions imposed by the licensors of such Plugin Integration for use of such Plugin Integrations. Licensee shall contact Axolotl if it has questions about whether its use of the Software falls beyond the scope of this Agreement.\nIntellectual Property Rights\n4.1 Axolotl and its contributors retain all intellectual property rights in and to the Software. Licensee\nacknowledges that this Agreement does not transfer any ownership rights or intellectual property rights to\nLicensee.\nDisclaimer of Warranty\n5.1 THE SOFTWARE IS PROVIDED “AS IS,” WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. IN NO EVENT SHALL\nTHE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF\nCONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\nTermination\n6.1 Axolotl may terminate this Agreement at any time if Licensee fails to comply with any of the terms and\nconditions set forth herein. Upon termination, Licensee shall cease all use of the Software and destroy any\ncopies in its possession.\nGoverning Law\n7.1 This Agreement shall be governed by and construed in accordance with the laws of the State of California,\nwithout regards to conflicts of laws provisions thereof.\nEntire Agreement\n8.1 This Agreement constitutes the entire agreement between Axolotl and Licensee with respect to the subject matter\nhereof and supersedes all prior or contemporaneous understandings or agreements between the parties concerning\nthe Software, whether written or oral. Axolotl may update the terms of this Agreement from time to time, and\nLicensee’s continued use of the Software after any such updates shall constitute acceptance of updated terms\non a go-forward basis. Axolotl will use commercially reasonable efforts to provide Licensee notice of any\nmaterial updates. By using the Software, Licensee acknowledges that it has read, understood, and agrees to be\nbound by the terms and conditions of this Agreement.\n\nThis Agreement was last updated on August 23, 2024." + }, + { + "objectID": "index.html", + "href": "index.html", + "title": "Axolotl", + "section": "", + "text": "Axolotl is a tool designed to streamline post-training for various AI models.\nPost-training refers to any modifications or additional training performed on\npre-trained models - including full model fine-tuning, parameter-efficient tuning (like\nLoRA and QLoRA), supervised fine-tuning (SFT), instruction tuning, and alignment\ntechniques. With support for multiple model architectures and training configurations,\nAxolotl makes it easy to get started with these techniques.\nAxolotl is designed to work with YAML config files that contain everything you need to\npreprocess a dataset, train or fine-tune a model, run model inference or evaluation,\nand much more.\nFeatures:", + "crumbs": [ + "Home" + ] + }, + { + "objectID": "index.html#quick-start", + "href": "index.html#quick-start", + "title": "Axolotl", + "section": "🚀 Quick Start", + "text": "🚀 Quick Start\nRequirements:\n\nNVIDIA GPU (Ampere or newer for bf16 and Flash Attention) or AMD GPU\nPython 3.11\nPyTorch ≥2.4.1\n\n\nInstallation\npip3 install -U packaging==23.2 setuptools==75.8.0 wheel ninja\npip3 install --no-build-isolation axolotl[flash-attn,deepspeed]\n\n# Download example axolotl configs, deepspeed configs\naxolotl fetch examples\naxolotl fetch deepspeed_configs # OPTIONAL\nOther installation approaches are described here.\n\n\nYour First Fine-tune\n# Fetch axolotl examples\naxolotl fetch examples\n\n# Or, specify a custom path\naxolotl fetch examples --dest path/to/folder\n\n# Train a model using LoRA\naxolotl train examples/llama-3/lora-1b.yml\nThat’s it! Check out our Getting Started Guide for a more detailed walkthrough.", + "crumbs": [ + "Home" + ] + }, + { + "objectID": "index.html#key-features", + "href": "index.html#key-features", + "title": "Axolotl", + "section": "✨ Key Features", + "text": "✨ Key Features\n\nMultiple Model Support: Train various models like LLaMA, Mistral, Mixtral, Pythia, and more\nTraining Methods: Full fine-tuning, LoRA, QLoRA, and more\nEasy Configuration: Simple YAML files to control your training setup\nPerformance Optimizations: Flash Attention, xformers, multi-GPU training\nFlexible Dataset Handling: Use various formats and custom datasets\nCloud Ready: Run on cloud platforms or local hardware", + "crumbs": [ + "Home" + ] + }, + { + "objectID": "index.html#documentation", + "href": "index.html#documentation", + "title": "Axolotl", + "section": "📚 Documentation", + "text": "📚 Documentation\n\nInstallation Options - Detailed setup instructions for different environments\nConfiguration Guide - Full configuration options and examples\nDataset Guide - Supported formats and how to use them\nMulti-GPU Training\nMulti-Node Training\nMultipacking\nAPI Reference - Auto-generated code documentation\nFAQ - Frequently asked questions", + "crumbs": [ + "Home" + ] + }, + { + "objectID": "index.html#getting-help", + "href": "index.html#getting-help", + "title": "Axolotl", + "section": "🤝 Getting Help", + "text": "🤝 Getting Help\n\nJoin our Discord community for support\nCheck out our Examples directory\nRead our Debugging Guide\nNeed dedicated support? Please contact ✉️wing@axolotl.ai for options", + "crumbs": [ + "Home" + ] + }, + { + "objectID": "index.html#contributing", + "href": "index.html#contributing", + "title": "Axolotl", + "section": "🌟 Contributing", + "text": "🌟 Contributing\nContributions are welcome! Please see our Contributing Guide for details.", + "crumbs": [ + "Home" + ] + }, + { + "objectID": "index.html#supported-models", + "href": "index.html#supported-models", + "title": "Axolotl", + "section": "Supported Models", + "text": "Supported Models\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nfp16/fp32\nlora\nqlora\ngptq\ngptq w/flash attn\nflash attn\nxformers attn\n\n\n\n\nllama\n✅\n✅\n✅\n✅\n✅\n✅\n✅\n\n\nMistral\n✅\n✅\n✅\n✅\n✅\n✅\n✅\n\n\nMixtral-MoE\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nMixtral8X22\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nPythia\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\ncerebras\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\nbtlm\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\nmpt\n✅\n❌\n❓\n❌\n❌\n❌\n❓\n\n\nfalcon\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\ngpt-j\n✅\n✅\n✅\n❌\n❌\n❓\n❓\n\n\nXGen\n✅\n❓\n✅\n❓\n❓\n❓\n✅\n\n\nphi\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nRWKV\n✅\n❓\n❓\n❓\n❓\n❓\n❓\n\n\nQwen\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nGemma\n✅\n✅\n✅\n❓\n❓\n✅\n❓\n\n\nJamba\n✅\n✅\n✅\n❓\n❓\n✅\n❓\n\n\n\n✅: supported\n❌: not supported\n❓: untested", + "crumbs": [ + "Home" + ] + }, + { + "objectID": "index.html#sponsors", + "href": "index.html#sponsors", + "title": "Axolotl", + "section": "❤️ Sponsors", + "text": "❤️ Sponsors\nThank you to our sponsors who help make Axolotl possible:\n\nModal - Modal lets you run\njobs in the cloud, by just writing a few lines of Python. Customers use Modal to deploy Gen AI models at large scale,\nfine-tune large language models, run protein folding simulations, and much more.\n\nInterested in sponsoring? Contact us at wing@axolotl.ai", + "crumbs": [ + "Home" + ] + }, + { + "objectID": "index.html#license", + "href": "index.html#license", + "title": "Axolotl", + "section": "📜 License", + "text": "📜 License\nThis project is licensed under the Apache 2.0 License - see the LICENSE file for details.", + "crumbs": [ + "Home" + ] + }, + { + "objectID": "src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html", + "href": "src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html", + "title": "Axolotl", + "section": "", + "text": "Acknowledgements\nPortions of this Cut Cross Entropy Software may utilize the following copyrighted\nmaterial, the use of which is hereby acknowledged.\n\nPyTorch\nFrom PyTorch:\n\nCopyright (c) 2016- Facebook, Inc (Adam Paszke)\nCopyright (c) 2014- Facebook, Inc (Soumith Chintala)\nCopyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\nCopyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\nCopyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\nCopyright (c) 2011-2013 NYU (Clement Farabet)\nCopyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\nCopyright (c) 2006 Idiap Research Institute (Samy Bengio)\nCopyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\nFrom Caffe2:\n\nCopyright (c) 2016-present, Facebook Inc. All rights reserved.\n\nAll contributions by Facebook:\nCopyright (c) 2016 Facebook Inc.\n\nAll contributions by Google:\nCopyright (c) 2015 Google Inc.\nAll rights reserved.\n\nAll contributions by Yangqing Jia:\nCopyright (c) 2015 Yangqing Jia\nAll rights reserved.\n\nAll contributions by Kakao Brain:\nCopyright 2019-2020 Kakao Brain\n\nAll contributions by Cruise LLC:\nCopyright (c) 2022 Cruise LLC.\nAll rights reserved.\n\nAll contributions by Arm:\nCopyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates\n\nAll contributions from Caffe:\nCopyright(c) 2013, 2014, 2015, the respective contributors\nAll rights reserved.\n\nAll other contributions:\nCopyright(c) 2015, 2016 the respective contributors\nAll rights reserved.\n\nCaffe2 uses a copyright model similar to Caffe: each contributor holds\ncopyright over their contributions to Caffe2. The project versioning records\nall such contribution and copyright details. If a contributor wants to further\nmark their specific copyright on a particular contribution, they should\nindicate their copyright solely in the commit message of the change when it is\ncommitted.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America\nand IDIAP Research Institute nor the names of its contributors may be\nused to endorse or promote products derived from this software without\nspecific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\nTriton\n/*\n* Copyright 2018-2020 Philippe Tillet\n* Copyright 2020-2022 OpenAI\n*\n* Permission is hereby granted, free of charge, to any person obtaining\n* a copy of this software and associated documentation files\n* (the \"Software\"), to deal in the Software without restriction,\n* including without limitation the rights to use, copy, modify, merge,\n* publish, distribute, sublicense, and/or sell copies of the Software,\n* and to permit persons to whom the Software is furnished to do so,\n* subject to the following conditions:\n*\n* The above copyright notice and this permission notice shall be\n* included in all copies or substantial portions of the Software.\n*\n* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n*/\nTransformers\nCopyright 2018- The Hugging Face team. All rights reserved.\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License." + }, + { + "objectID": "FAQS.html", + "href": "FAQS.html", + "title": "FAQs", + "section": "", + "text": "FAQs\n\nCan you train StableLM with this? Yes, but only with a single GPU atm. Multi GPU support is coming soon! Just waiting on this PR\nWill this work with Deepspeed? That’s still a WIP, but setting export ACCELERATE_USE_DEEPSPEED=true should work in some cases\nError invalid argument at line 359 in file /workspace/bitsandbytes/csrc/pythonInterface.c\n/arrow/cpp/src/arrow/filesystem/s3fs.cc:2598: arrow::fs::FinalizeS3 was not called even though S3 was initialized.\nThis could lead to a segmentation fault at exit. Try reinstalling bitsandbytes and transformers from source." + }, + { + "objectID": "docs/multi-node.html", + "href": "docs/multi-node.html", + "title": "Multi Node", + "section": "", + "text": "The below are three ways to train multi-node in Axolotl.", + "crumbs": [ + "Deployments", + "Multi Node" + ] + }, + { + "objectID": "docs/multi-node.html#accelerate", + "href": "docs/multi-node.html#accelerate", + "title": "Multi Node", + "section": "Accelerate", + "text": "Accelerate\nYou will need to create a configuration for accelerate, either by using accelerate config and follow the instructions or you can use one of the preset below:\n~/.cache/huggingface/accelerate/default_config.yaml\ncompute_environment: LOCAL_MACHINE\ndebug: false\ndistributed_type: FSDP\ndowncast_bf16: 'no'\nmachine_rank: 0 # Set to 0 for the main machine, increment by one for other machines\nmain_process_ip: 10.0.0.4 # Set to main machine's IP\nmain_process_port: 5000\nmain_training_function: main\nmixed_precision: bf16\nnum_machines: 2 # Change to the number of machines\nnum_processes: 4 # That's the total number of GPUs, (for example: if you have 2 machines with 4 GPU, put 8)\nrdzv_backend: static\nsame_network: true\ntpu_env: []\ntpu_use_cluster: false\ntpu_use_sudo: false\nuse_cpu: false\nConfigure your model to use FSDP in the Axolotl yaml. For example:\nfsdp:\n - full_shard\n - auto_wrap\nfsdp_config:\n fsdp_offload_params: true\n fsdp_state_dict_type: FULL_STATE_DICT\n fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer\nAll you have to do now is launch using accelerate as you would usually do on each machine and voila, the processes will start once you have launched accelerate on every machine.", + "crumbs": [ + "Deployments", + "Multi Node" + ] + }, + { + "objectID": "docs/multi-node.html#raytrain", + "href": "docs/multi-node.html#raytrain", + "title": "Multi Node", + "section": "Raytrain", + "text": "Raytrain\nPlease see ray train doc here.", + "crumbs": [ + "Deployments", + "Multi Node" + ] + }, + { + "objectID": "docs/multi-node.html#torchrun", + "href": "docs/multi-node.html#torchrun", + "title": "Multi Node", + "section": "Torchrun", + "text": "Torchrun\nIf you are using Infiniband, we recommend torchrun to utilize the full bandwidth.\nSet the following env (change buffersize/socketname depending on your system):\nexport NCCL_IB_DISABLE=0\nexport NCCL_SOCKET_IFNAME=\"eth0,en,eth,em,bond\"\nexport NCCL_BUFFSIZE=2097152\nRun the following on each node:\ntorchrun --nnodes $num_nodes --nproc_per_node $gpu_per_node --rdzv_id $rdzv_id --rdzv_backend c10d --rdzv_endpoint \"$head_node_ip:$head_node_port\" -m axolotl.cli.train config.yaml\nPlease make sure to substitute the placeholder variables.\n\nnum_nodes: Number of nodes (containing GPUs)\ngpu_per_node: Number of gpus per node\nhead_node_ip: IP of the head node (make sure other machines can connect to this)\nhead_node_port: Port of the head node (make sure other machines can connect to this. Default 29400)\nrdzv_id: A unique job ID that is used by the job across nodes.\n\n\n\n\n\n\n\nNote\n\n\n\nYou need to call axolotl.cli.train instead of axolotl train as the latter calls accelerate under the hood\n\n\nMore info on the available configs can be found on the Pytorch docs here", + "crumbs": [ + "Deployments", + "Multi Node" ] }, { @@ -1588,293 +1823,102 @@ ] }, { - "objectID": "docs/multi-node.html", - "href": "docs/multi-node.html", - "title": "Multi Node", + "objectID": "docs/multipack.html", + "href": "docs/multipack.html", + "title": "Multipack (Sample Packing)", "section": "", - "text": "The below are three ways to train multi-node in Axolotl.", - "crumbs": [ - "Deployments", - "Multi Node" - ] - }, - { - "objectID": "docs/multi-node.html#accelerate", - "href": "docs/multi-node.html#accelerate", - "title": "Multi Node", - "section": "Accelerate", - "text": "Accelerate\nYou will need to create a configuration for accelerate, either by using accelerate config and follow the instructions or you can use one of the preset below:\n~/.cache/huggingface/accelerate/default_config.yaml\ncompute_environment: LOCAL_MACHINE\ndebug: false\ndistributed_type: FSDP\ndowncast_bf16: 'no'\nmachine_rank: 0 # Set to 0 for the main machine, increment by one for other machines\nmain_process_ip: 10.0.0.4 # Set to main machine's IP\nmain_process_port: 5000\nmain_training_function: main\nmixed_precision: bf16\nnum_machines: 2 # Change to the number of machines\nnum_processes: 4 # That's the total number of GPUs, (for example: if you have 2 machines with 4 GPU, put 8)\nrdzv_backend: static\nsame_network: true\ntpu_env: []\ntpu_use_cluster: false\ntpu_use_sudo: false\nuse_cpu: false\nConfigure your model to use FSDP in the Axolotl yaml. For example:\nfsdp:\n - full_shard\n - auto_wrap\nfsdp_config:\n fsdp_offload_params: true\n fsdp_state_dict_type: FULL_STATE_DICT\n fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer\nAll you have to do now is launch using accelerate as you would usually do on each machine and voila, the processes will start once you have launched accelerate on every machine.", - "crumbs": [ - "Deployments", - "Multi Node" - ] - }, - { - "objectID": "docs/multi-node.html#raytrain", - "href": "docs/multi-node.html#raytrain", - "title": "Multi Node", - "section": "Raytrain", - "text": "Raytrain\nPlease see ray train doc here.", - "crumbs": [ - "Deployments", - "Multi Node" - ] - }, - { - "objectID": "docs/multi-node.html#torchrun", - "href": "docs/multi-node.html#torchrun", - "title": "Multi Node", - "section": "Torchrun", - "text": "Torchrun\nIf you are using Infiniband, we recommend torchrun to utilize the full bandwidth.\nSet the following env (change buffersize/socketname depending on your system):\nexport NCCL_IB_DISABLE=0\nexport NCCL_SOCKET_IFNAME=\"eth0,en,eth,em,bond\"\nexport NCCL_BUFFSIZE=2097152\nRun the following on each node:\ntorchrun --nnodes $num_nodes --nproc_per_node $gpu_per_node --rdzv_id $rdzv_id --rdzv_backend c10d --rdzv_endpoint \"$head_node_ip:$head_node_port\" -m axolotl.cli.train config.yaml\nPlease make sure to substitute the placeholder variables.\n\nnum_nodes: Number of nodes (containing GPUs)\ngpu_per_node: Number of gpus per node\nhead_node_ip: IP of the head node (make sure other machines can connect to this)\nhead_node_port: Port of the head node (make sure other machines can connect to this. Default 29400)\nrdzv_id: A unique job ID that is used by the job across nodes.\n\n\n\n\n\n\n\nNote\n\n\n\nYou need to call axolotl.cli.train instead of axolotl train as the latter calls accelerate under the hood\n\n\nMore info on the available configs can be found on the Pytorch docs here", - "crumbs": [ - "Deployments", - "Multi Node" - ] - }, - { - "objectID": "FAQS.html", - "href": "FAQS.html", - "title": "FAQs", - "section": "", - "text": "FAQs\n\nCan you train StableLM with this? Yes, but only with a single GPU atm. Multi GPU support is coming soon! Just waiting on this PR\nWill this work with Deepspeed? That’s still a WIP, but setting export ACCELERATE_USE_DEEPSPEED=true should work in some cases\nError invalid argument at line 359 in file /workspace/bitsandbytes/csrc/pythonInterface.c\n/arrow/cpp/src/arrow/filesystem/s3fs.cc:2598: arrow::fs::FinalizeS3 was not called even though S3 was initialized.\nThis could lead to a segmentation fault at exit. Try reinstalling bitsandbytes and transformers from source." - }, - { - "objectID": "src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html", - "href": "src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html", - "title": "Axolotl", - "section": "", - "text": "Acknowledgements\nPortions of this Cut Cross Entropy Software may utilize the following copyrighted\nmaterial, the use of which is hereby acknowledged.\n\nPyTorch\nFrom PyTorch:\n\nCopyright (c) 2016- Facebook, Inc (Adam Paszke)\nCopyright (c) 2014- Facebook, Inc (Soumith Chintala)\nCopyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\nCopyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\nCopyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\nCopyright (c) 2011-2013 NYU (Clement Farabet)\nCopyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\nCopyright (c) 2006 Idiap Research Institute (Samy Bengio)\nCopyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\nFrom Caffe2:\n\nCopyright (c) 2016-present, Facebook Inc. All rights reserved.\n\nAll contributions by Facebook:\nCopyright (c) 2016 Facebook Inc.\n\nAll contributions by Google:\nCopyright (c) 2015 Google Inc.\nAll rights reserved.\n\nAll contributions by Yangqing Jia:\nCopyright (c) 2015 Yangqing Jia\nAll rights reserved.\n\nAll contributions by Kakao Brain:\nCopyright 2019-2020 Kakao Brain\n\nAll contributions by Cruise LLC:\nCopyright (c) 2022 Cruise LLC.\nAll rights reserved.\n\nAll contributions by Arm:\nCopyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates\n\nAll contributions from Caffe:\nCopyright(c) 2013, 2014, 2015, the respective contributors\nAll rights reserved.\n\nAll other contributions:\nCopyright(c) 2015, 2016 the respective contributors\nAll rights reserved.\n\nCaffe2 uses a copyright model similar to Caffe: each contributor holds\ncopyright over their contributions to Caffe2. The project versioning records\nall such contribution and copyright details. If a contributor wants to further\nmark their specific copyright on a particular contribution, they should\nindicate their copyright solely in the commit message of the change when it is\ncommitted.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America\nand IDIAP Research Institute nor the names of its contributors may be\nused to endorse or promote products derived from this software without\nspecific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\nTriton\n/*\n* Copyright 2018-2020 Philippe Tillet\n* Copyright 2020-2022 OpenAI\n*\n* Permission is hereby granted, free of charge, to any person obtaining\n* a copy of this software and associated documentation files\n* (the \"Software\"), to deal in the Software without restriction,\n* including without limitation the rights to use, copy, modify, merge,\n* publish, distribute, sublicense, and/or sell copies of the Software,\n* and to permit persons to whom the Software is furnished to do so,\n* subject to the following conditions:\n*\n* The above copyright notice and this permission notice shall be\n* included in all copies or substantial portions of the Software.\n*\n* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n*/\nTransformers\nCopyright 2018- The Hugging Face team. All rights reserved.\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License." - }, - { - "objectID": "index.html", - "href": "index.html", - "title": "Axolotl", - "section": "", - "text": "Axolotl is a tool designed to streamline post-training for various AI models.\nPost-training refers to any modifications or additional training performed on\npre-trained models - including full model fine-tuning, parameter-efficient tuning (like\nLoRA and QLoRA), supervised fine-tuning (SFT), instruction tuning, and alignment\ntechniques. With support for multiple model architectures and training configurations,\nAxolotl makes it easy to get started with these techniques.\nAxolotl is designed to work with YAML config files that contain everything you need to\npreprocess a dataset, train or fine-tune a model, run model inference or evaluation,\nand much more.\nFeatures:", - "crumbs": [ - "Home" - ] - }, - { - "objectID": "index.html#quick-start", - "href": "index.html#quick-start", - "title": "Axolotl", - "section": "🚀 Quick Start", - "text": "🚀 Quick Start\nRequirements:\n\nNVIDIA GPU (Ampere or newer for bf16 and Flash Attention) or AMD GPU\nPython 3.11\nPyTorch ≥2.4.1\n\n\nInstallation\npip3 install -U packaging==23.2 setuptools==75.8.0 wheel ninja\npip3 install --no-build-isolation axolotl[flash-attn,deepspeed]\n\n# Download example axolotl configs, deepspeed configs\naxolotl fetch examples\naxolotl fetch deepspeed_configs # OPTIONAL\nOther installation approaches are described here.\n\n\nYour First Fine-tune\n# Fetch axolotl examples\naxolotl fetch examples\n\n# Or, specify a custom path\naxolotl fetch examples --dest path/to/folder\n\n# Train a model using LoRA\naxolotl train examples/llama-3/lora-1b.yml\nThat’s it! Check out our Getting Started Guide for a more detailed walkthrough.", - "crumbs": [ - "Home" - ] - }, - { - "objectID": "index.html#key-features", - "href": "index.html#key-features", - "title": "Axolotl", - "section": "✨ Key Features", - "text": "✨ Key Features\n\nMultiple Model Support: Train various models like LLaMA, Mistral, Mixtral, Pythia, and more\nTraining Methods: Full fine-tuning, LoRA, QLoRA, and more\nEasy Configuration: Simple YAML files to control your training setup\nPerformance Optimizations: Flash Attention, xformers, multi-GPU training\nFlexible Dataset Handling: Use various formats and custom datasets\nCloud Ready: Run on cloud platforms or local hardware", - "crumbs": [ - "Home" - ] - }, - { - "objectID": "index.html#documentation", - "href": "index.html#documentation", - "title": "Axolotl", - "section": "📚 Documentation", - "text": "📚 Documentation\n\nInstallation Options - Detailed setup instructions for different environments\nConfiguration Guide - Full configuration options and examples\nDataset Guide - Supported formats and how to use them\nMulti-GPU Training\nMulti-Node Training\nMultipacking\nAPI Reference - Auto-generated code documentation\nFAQ - Frequently asked questions", - "crumbs": [ - "Home" - ] - }, - { - "objectID": "index.html#getting-help", - "href": "index.html#getting-help", - "title": "Axolotl", - "section": "🤝 Getting Help", - "text": "🤝 Getting Help\n\nJoin our Discord community for support\nCheck out our Examples directory\nRead our Debugging Guide\nNeed dedicated support? Please contact ✉️wing@axolotl.ai for options", - "crumbs": [ - "Home" - ] - }, - { - "objectID": "index.html#contributing", - "href": "index.html#contributing", - "title": "Axolotl", - "section": "🌟 Contributing", - "text": "🌟 Contributing\nContributions are welcome! Please see our Contributing Guide for details.", - "crumbs": [ - "Home" - ] - }, - { - "objectID": "index.html#supported-models", - "href": "index.html#supported-models", - "title": "Axolotl", - "section": "Supported Models", - "text": "Supported Models\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nfp16/fp32\nlora\nqlora\ngptq\ngptq w/flash attn\nflash attn\nxformers attn\n\n\n\n\nllama\n✅\n✅\n✅\n✅\n✅\n✅\n✅\n\n\nMistral\n✅\n✅\n✅\n✅\n✅\n✅\n✅\n\n\nMixtral-MoE\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nMixtral8X22\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nPythia\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\ncerebras\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\nbtlm\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\nmpt\n✅\n❌\n❓\n❌\n❌\n❌\n❓\n\n\nfalcon\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\ngpt-j\n✅\n✅\n✅\n❌\n❌\n❓\n❓\n\n\nXGen\n✅\n❓\n✅\n❓\n❓\n❓\n✅\n\n\nphi\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nRWKV\n✅\n❓\n❓\n❓\n❓\n❓\n❓\n\n\nQwen\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nGemma\n✅\n✅\n✅\n❓\n❓\n✅\n❓\n\n\nJamba\n✅\n✅\n✅\n❓\n❓\n✅\n❓\n\n\n\n✅: supported\n❌: not supported\n❓: untested", - "crumbs": [ - "Home" - ] - }, - { - "objectID": "index.html#sponsors", - "href": "index.html#sponsors", - "title": "Axolotl", - "section": "❤️ Sponsors", - "text": "❤️ Sponsors\nThank you to our sponsors who help make Axolotl possible:\n\nModal - Modal lets you run\njobs in the cloud, by just writing a few lines of Python. Customers use Modal to deploy Gen AI models at large scale,\nfine-tune large language models, run protein folding simulations, and much more.\n\nInterested in sponsoring? Contact us at wing@axolotl.ai", - "crumbs": [ - "Home" - ] - }, - { - "objectID": "index.html#license", - "href": "index.html#license", - "title": "Axolotl", - "section": "📜 License", - "text": "📜 License\nThis project is licensed under the Apache 2.0 License - see the LICENSE file for details.", - "crumbs": [ - "Home" - ] - }, - { - "objectID": "src/axolotl/integrations/LICENSE.html", - "href": "src/axolotl/integrations/LICENSE.html", - "title": "Axolotl", - "section": "", - "text": "AXOLOTL COMMUNITY LICENSE AGREEMENT\nThis Axolotl Community License Agreement (“Agreement”) is entered into by and between Axolotl AI Corp. (“Axolotl”) and\nany individual or entity (“Licensee”) who wishes to use the Software (as defined below) in accordance with the terms\nand conditions set forth in this Agreement.\n\nDefinitions\n1.1 “Licensee” refers to any individual or entity who has obtained a copy of the Software under this Agreement.\n1.2 “Plugin Integration” means independent integration software modules which may or may not be offered by Axolotl,\nwhich may be licensed separately by their respective authors and/or licensors.\n1.3 “Software” refers to the specific sub-directory of the Axolotl, Inc. software located at\nhttps://github.com/axolotl-ai-cloud/axolotl/tree/main/src/axolotl/integrations and its subdirectories which\npermits Plugin Integrations to integrate with the Axolotl service.\nGrant of License\n2.1 Axolotl hereby grants Licensee a worldwide, non-exclusive, royalty-free, license to use, copy, modify, merge,\npublish, distribute, sublicense, and/or otherwise exploit the Software, subject to the following conditions:\n- Licensee must comply with all the terms and conditions of this Agreement.\n- Licensee must include the original copyright notice and disclaimer of warranty in all copies or substantial\nportions of the Software.\n2.2 Licensee may use the Software for any lawful purpose, except as restricted in Section 3.\nRestrictions\n3.1 Licensee shall not use the Software for any activity that constitutes a commercial activity of offering for\nfree or for sale any services, platform, or equivalent to third parties for the purposes of allowing such\nthird parties to fine-tune artificial intelligence models.\n3.2 Licensee shall not:\n- Use the Software for any illegal or unauthorized purpose.\n- Reverse engineer, decompile, or disassemble the Software.\n- Remove or modify any copyright, trademark, or other proprietary notices contained in the Software.\n- Use the Software in a way that could damage, disable, overburden, or impair the functionality of the\nSoftware or interfere with any third-party use of the Software.\n3.3 Axolotl reserves the right to restrict certain Plugin Integrations for use with the Software. To the extent Licensee integrates a permitted, applicable Plugin Integration with the Software, Licensee shall comply with any additional terms and conditions imposed by the licensors of such Plugin Integration for use of such Plugin Integrations. Licensee shall contact Axolotl if it has questions about whether its use of the Software falls beyond the scope of this Agreement.\nIntellectual Property Rights\n4.1 Axolotl and its contributors retain all intellectual property rights in and to the Software. Licensee\nacknowledges that this Agreement does not transfer any ownership rights or intellectual property rights to\nLicensee.\nDisclaimer of Warranty\n5.1 THE SOFTWARE IS PROVIDED “AS IS,” WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. IN NO EVENT SHALL\nTHE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF\nCONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\nTermination\n6.1 Axolotl may terminate this Agreement at any time if Licensee fails to comply with any of the terms and\nconditions set forth herein. Upon termination, Licensee shall cease all use of the Software and destroy any\ncopies in its possession.\nGoverning Law\n7.1 This Agreement shall be governed by and construed in accordance with the laws of the State of California,\nwithout regards to conflicts of laws provisions thereof.\nEntire Agreement\n8.1 This Agreement constitutes the entire agreement between Axolotl and Licensee with respect to the subject matter\nhereof and supersedes all prior or contemporaneous understandings or agreements between the parties concerning\nthe Software, whether written or oral. Axolotl may update the terms of this Agreement from time to time, and\nLicensee’s continued use of the Software after any such updates shall constitute acceptance of updated terms\non a go-forward basis. Axolotl will use commercially reasonable efforts to provide Licensee notice of any\nmaterial updates. By using the Software, Licensee acknowledges that it has read, understood, and agrees to be\nbound by the terms and conditions of this Agreement.\n\nThis Agreement was last updated on August 23, 2024." - }, - { - "objectID": "TODO.html", - "href": "TODO.html", - "title": "todo list", - "section": "", - "text": "[] Validation of parameters for combinations that won’t work\n\n\n\n\nFSDP offload and gradient_checkpointing - https://github.com/pytorch/pytorch/issues/82203\nadamw_bnb_8bit doesn’t play well with FSDP offload" - }, - { - "objectID": "TODO.html#things-that-are-known-not-to-work", - "href": "TODO.html#things-that-are-known-not-to-work", - "title": "todo list", - "section": "", - "text": "FSDP offload and gradient_checkpointing - https://github.com/pytorch/pytorch/issues/82203\nadamw_bnb_8bit doesn’t play well with FSDP offload" - }, - { - "objectID": "docs/lr_groups.html", - "href": "docs/lr_groups.html", - "title": "Learning Rate Groups", - "section": "", - "text": "Inspired by LoRA+, Axolotl allows practitioners to specify separate learning rates for each module or groups of\nmodules in a model.", - "crumbs": [ - "How To Guides", - "Learning Rate Groups" - ] - }, - { - "objectID": "docs/lr_groups.html#background", - "href": "docs/lr_groups.html#background", - "title": "Learning Rate Groups", - "section": "", - "text": "Inspired by LoRA+, Axolotl allows practitioners to specify separate learning rates for each module or groups of\nmodules in a model.", - "crumbs": [ - "How To Guides", - "Learning Rate Groups" - ] - }, - { - "objectID": "docs/lr_groups.html#example", - "href": "docs/lr_groups.html#example", - "title": "Learning Rate Groups", - "section": "Example", - "text": "Example\nlr_groups:\n - name: o_proj\n modules:\n - self_attn.o_proj.weight\n lr: 1e-6\n - name: q_proj\n modules:\n - model.layers.2.self_attn.q_proj.weight\n lr: 1e-5\n\nlearning_rate: 2e-5\nIn this example, we have a default learning rate of 2e-5 across the entire model, but we have a separate learning rate\nof 1e-6 for all the self attention o_proj modules across all layers, and a learning are of 1e-5 to the 3rd layer’s\nself attention q_proj module.", - "crumbs": [ - "How To Guides", - "Learning Rate Groups" - ] - }, - { - "objectID": "docs/debugging.html", - "href": "docs/debugging.html", - "title": "Debugging", - "section": "", - "text": "This document provides some tips and tricks for debugging Axolotl. It also provides an example configuration for debugging with VSCode. A good debugging setup is essential to understanding how Axolotl code works behind the scenes.", - "crumbs": [ - "Troubleshooting", - "Debugging" - ] - }, - { - "objectID": "docs/debugging.html#table-of-contents", - "href": "docs/debugging.html#table-of-contents", - "title": "Debugging", - "section": "Table of Contents", - "text": "Table of Contents\n\nGeneral Tips\nDebugging with VSCode\n\nBackground\nConfiguration\nCustomizing your debugger\nVideo Tutorial\n\nDebugging With Docker\n\nSetup\nAttach To Container\nVideo - Attaching To Docker On Remote Host", - "crumbs": [ - "Troubleshooting", - "Debugging" - ] - }, - { - "objectID": "docs/debugging.html#general-tips", - "href": "docs/debugging.html#general-tips", - "title": "Debugging", - "section": "General Tips", - "text": "General Tips\nWhile debugging it’s helpful to simplify your test scenario as much as possible. Here are some tips for doing so:\n\n[!Important]\nAll of these tips are incorporated into the example configuration for debugging with VSCode below.\n\n\nMake sure you are using the latest version of axolotl: This project changes often and bugs get fixed fast. Check your git branch and make sure you have pulled the latest changes from main.\nEliminate concurrency: Restrict the number of processes to 1 for both training and data preprocessing:\n\nSet CUDA_VISIBLE_DEVICES to a single GPU, ex: export CUDA_VISIBLE_DEVICES=0.\nSet dataset_processes: 1 in your axolotl config or run the training command with --dataset_processes=1.\n\nUse a small dataset: Construct or use a small dataset from HF Hub. When using a small dataset, you will often have to make sure sample_packing: False and eval_sample_packing: False to avoid errors. If you are in a pinch and don’t have time to construct a small dataset but want to use from the HF Hub, you can shard the data (this will still tokenize the entire dataset, but will only use a fraction of the data for training. For example, to shard the dataset into 20 pieces, add the following to your axolotl config):\ndatasets:\n ...\n shards: 20\nUse a small model: A good example of a small model is TinyLlama/TinyLlama-1.1B-Chat-v1.0.\nMinimize iteration time: Make sure the training loop finishes as fast as possible, with these settings.\n\nmicro_batch_size: 1\nmax_steps: 1\nval_set_size: 0\n\nClear Caches: Axolotl caches certain steps and so does the underlying HuggingFace trainer. You may want to clear some of these caches when debugging.\n\nData preprocessing: When debugging data preprocessing, which includes prompt template formation, you may want to delete the directory set in dataset_prepared_path: in your axolotl config. If you didn’t set this value, the default is last_run_prepared.\nHF Hub: If you are debugging data preprocessing, you should clear the relevant HF cache HuggingFace cache, by deleting the appropriate ~/.cache/huggingface/datasets/... folder(s).\nThe recommended approach is to redirect all outputs and caches to a temporary folder and delete selected subfolders before each run. This is demonstrated in the example configuration below.", - "crumbs": [ - "Troubleshooting", - "Debugging" - ] - }, - { - "objectID": "docs/debugging.html#debugging-with-vscode", - "href": "docs/debugging.html#debugging-with-vscode", - "title": "Debugging", - "section": "Debugging with VSCode", - "text": "Debugging with VSCode\n\nBackground\nThe below example shows how to configure VSCode to debug data preprocessing of the chat_template format. This is the format used when you have the following in your axolotl config:\ndatasets:\n - path: <path to your chat_template formatted dataset> # example on HF Hub: fozziethebeat/alpaca_messages_2k_test\n type: chat_template\n\n[!Important]\nIf you are already familiar with advanced VSCode debugging, you can skip the below explanation and look at the files .vscode/launch.json and .vscode/tasks.json for an example configuration.\n\n\n[!Tip]\nIf you prefer to watch a video, rather than read, you can skip to the video tutorial below (but doing both is recommended).\n\n\n\nSetup\nMake sure you have an editable install of Axolotl, which ensures that changes you make to the code are reflected at runtime. Run the following commands from the root of this project:\npip3 install packaging\npip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'\n\nRemote Hosts\nIf you developing on a remote host, you can easily use VSCode to debug remotely. To do so, you will need to follow this remote - SSH guide. You can also see the video below on Docker and Remote SSH debugging.\n\n\n\nConfiguration\nThe easiest way to get started is to modify the .vscode/launch.json file in this project. This is just an example configuration, so you may need to modify or copy it to suit your needs.\nFor example, to mimic the command cd devtools && CUDA_VISIBLE_DEVICES=0 accelerate launch -m axolotl.cli.train dev_chat_template.yml, you would use the below configuration1. Note that we add additional flags that override the axolotl config and incorporate the tips above (see the comments). We also set the working directory to devtools and set the env variable HF_HOME to a temporary folder that is later partially deleted. This is because we want to delete the HF dataset cache before each run in order to ensure that the data preprocessing code is run from scratch.\n// .vscode/launch.json\n{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Debug axolotl prompt - chat_template\",\n \"type\": \"python\",\n \"module\": \"accelerate.commands.launch\",\n \"request\": \"launch\",\n \"args\": [\n \"-m\", \"axolotl.cli.train\", \"dev_chat_template.yml\",\n // The flags below simplify debugging by overriding the axolotl config\n // with the debugging tips above. Modify as needed.\n \"--dataset_processes=1\", // limits data preprocessing to one process\n \"--max_steps=1\", // limits training to just one step\n \"--batch_size=1\", // minimizes batch size\n \"--micro_batch_size=1\", // minimizes batch size\n \"--val_set_size=0\", // disables validation\n \"--sample_packing=False\", // disables sample packing which is necessary for small datasets\n \"--eval_sample_packing=False\",// disables sample packing on eval set\n \"--dataset_prepared_path=temp_debug/axolotl_outputs/data\", // send data outputs to a temp folder\n \"--output_dir=temp_debug/axolotl_outputs/model\" // send model outputs to a temp folder\n ],\n \"console\": \"integratedTerminal\", // show output in the integrated terminal\n \"cwd\": \"${workspaceFolder}/devtools\", // set working directory to devtools from the root of the project\n \"justMyCode\": true, // step through only axolotl code\n \"env\": {\"CUDA_VISIBLE_DEVICES\": \"0\", // Since we aren't doing distributed training, we need to limit to one GPU\n \"HF_HOME\": \"${workspaceFolder}/devtools/temp_debug/.hf-cache\"}, // send HF cache to a temp folder\n \"preLaunchTask\": \"cleanup-for-dataprep\", // delete temp folders (see below)\n }\n ]\n}\nAdditional notes about this configuration:\n\nThe argument justMyCode is set to true such that you step through only the axolotl code. If you want to step into dependencies, set this to false.\nThe preLaunchTask: cleanup-for-dataprep is defined in .vscode/tasks.json and is used to delete the following folders before debugging, which is essential to ensure that the data pre-processing code is run from scratch:\n\n./devtools/temp_debug/axolotl_outputs\n./devtools/temp_debug/.hf-cache/datasets\n\n\n\n[!Tip]\nYou may not want to delete these folders. For example, if you are debugging model training instead of data pre-processing, you may NOT want to delete the cache or output folders. You may also need to add additional tasks to the tasks.json file depending on your use case.\n\nBelow is the ./vscode/tasks.json file that defines the cleanup-for-dataprep task. This task is run before each debugging session when you use the above configuration. Note how there are two tasks that delete the two folders mentioned above. The third task cleanup-for-dataprep is a composite task that combines the two tasks. A composite task is necessary because VSCode does not allow you to specify multiple tasks in the preLaunchTask argument of the launch.json file.\n// .vscode/tasks.json\n// this file is used by launch.json\n{\n \"version\": \"2.0.0\",\n \"tasks\": [\n // this task changes into the devtools directory and deletes the temp_debug/axolotl_outputs folder\n {\n \"label\": \"delete-outputs\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/axolotl_outputs\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task changes into the devtools directory and deletes the `temp_debug/.hf-cache/datasets` folder\n {\n \"label\": \"delete-temp-hf-dataset-cache\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/.hf-cache/datasets\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task combines the two tasks above\n {\n \"label\": \"cleanup-for-dataprep\",\n \"dependsOn\": [\"delete-outputs\", \"delete-temp-hf-dataset-cache\"],\n }\n ]\n}\n\n\nCustomizing your debugger\nYour debugging use case may differ from the example above. The easiest thing to do is to put your own axolotl config in the devtools folder and modify the launch.json file to use your config. You may also want to modify the preLaunchTask to delete different folders or not delete anything at all.\n\n\nVideo Tutorial\nThe following video tutorial walks through the above configuration and demonstrates how to debug with VSCode, (click the image below to watch):\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl w/VSCode", - "crumbs": [ - "Troubleshooting", - "Debugging" - ] - }, - { - "objectID": "docs/debugging.html#debugging-with-docker", - "href": "docs/debugging.html#debugging-with-docker", - "title": "Debugging", - "section": "Debugging With Docker", - "text": "Debugging With Docker\nUsing official Axolotl Docker images is a great way to debug your code, and is a very popular way to use Axolotl. Attaching VSCode to Docker takes a few more steps.\n\nSetup\nOn the host that is running axolotl (ex: if you are using a remote host), clone the axolotl repo and change your current directory to the root:\ngit clone https://github.com/axolotl-ai-cloud/axolotl\ncd axolotl\n\n[!Tip]\nIf you already have axolotl cloned on your host, make sure you have the latest changes and change into the root of the project.\n\nNext, run the desired docker image and mount the current directory. Below is a docker command you can run to do this:2\ndocker run --privileged --gpus '\"all\"' --shm-size 10g --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=bind,src=\"${PWD}\",target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface axolotlai/axolotl:main-py3.10-cu118-2.0.1\n\n[!Tip]\nTo understand which containers are available, see the Docker section of the README and the DockerHub repo. For details of how the Docker containers are built, see axolotl’s Docker CI builds.\n\nYou will now be in the container. Next, perform an editable install of Axolotl:\npip3 install packaging\npip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'\n\n\nAttach To Container\nNext, if you are using a remote host, Remote into this host with VSCode. If you are using a local host, you can skip this step.\nNext, select Dev Containers: Attach to Running Container... using the command palette (CMD + SHIFT + P) in VSCode. You will be prompted to select a container to attach to. Select the container you just created. You will now be in the container with a working directory that is at the root of the project. Any changes you make to the code will be reflected both in the container and on the host.\nNow you are ready to debug as described above (see Debugging with VSCode).\n\n\nVideo - Attaching To Docker On Remote Host\nHere is a short video that demonstrates how to attach to a Docker container on a remote host:\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl Part 2: Attaching to Docker on a Remote Host", - "crumbs": [ - "Troubleshooting", - "Debugging" - ] - }, - { - "objectID": "docs/debugging.html#footnotes", - "href": "docs/debugging.html#footnotes", - "title": "Debugging", - "section": "Footnotes", - "text": "Footnotes\n\n\nThe config actually mimics the command CUDA_VISIBLE_DEVICES=0 python -m accelerate.commands.launch -m axolotl.cli.train devtools/chat_template.yml, but this is the same thing.↩︎\nMany of the below flags are recommended best practices by Nvidia when using nvidia-container-toolkit. You can read more about these flags here.↩︎", - "crumbs": [ - "Troubleshooting", - "Debugging" - ] - }, - { - "objectID": "docs/faq.html", - "href": "docs/faq.html", - "title": "FAQ", - "section": "", - "text": "General\nQ: The trainer stopped and hasn’t progressed in several minutes.\n\nA: Usually an issue with the GPUs communicating with each other. See the NCCL doc\n\nQ: Exitcode -9\n\nA: This usually happens when you run out of system RAM.\n\nQ: Exitcode -7 while using deepspeed\n\nA: Try upgrading deepspeed w: pip install -U deepspeed\n\nQ: AttributeError: ‘DummyOptim’ object has no attribute ‘step’\nQ: ModuleNotFoundError: No module named ‘mpi4py’ using single GPU with deepspeed\n\nA: You may be using deepspeed with single gpu. Please remove the deepspeed: section in the yaml file or --deepspeed CLI flag.\n\nQ: The codes is stuck on saving preprocessed datasets.\n\nA: This is usually an issue with the GPU. This can be resolved through setting the os environment variable CUDA_VISIBLE_DEVICES=0. If you are on runpod, this is usually a pod issue. Starting a new pod should take care of it.\n\nQ: Received mismatch error on merge adapters / loading adapters between torch.Size of checkpoint and model.\n\nA: This is likely due to vocab size mismatch. By default, Axolotl expands the model’s embeddings if the tokenizer has more tokens than the model. Please use the axolotl merge-lora command to merge the adapters instead of using your own scripts.\n\n\nOn the other hand, if the model has more tokens than the tokenizer, Axolotl does not shrink the model’s embeddings unless shrink_embeddings: true is set in the config.\n\nQ: How to call Axolotl via custom python scripts?\n\nA: Since Axolotl is just Python, please see src/axolotl/cli/main.py on how each command is called.\n\nQ: How to know the value to use for fsdp_transformer_layer_cls_to_wrap?\n\nA: This is the class name of the transformer layer to wrap with FSDP. For example, for LlamaForCausalLM, the value is LlamaDecoderLayer. To find this for a specific model, check the model’s PreTrainedModel definition and look for _no_split_modules variable in the modeling_<model_name>.py file within transformers library.\n\nQ: ValueError: Asking to pad but the tokenizer does not have a padding token. Please select a token to use as pad_token\n\nA: This is because the tokenizer does not have a padding token. Please add a padding token to the tokenizer via:\n\n\nspecial_tokens:\n # str. If you're not sure, set to same as `eos_token`.\n pad_token: \"...\"\n\n\n\nChat templates\nQ: jinja2.exceptions.UndefinedError: 'dict object' has no attribute 'content' / 'role' / ____\n\nA: This means that the property mapping for the stated attribute does not exist when building chat_template prompt. For example, if no attribute 'content', please check you have added the correct mapping for content under message_property_mappings.\n\nQ: Empty template generated for turn ___\n\nA: The content is empty for that turn.\n\nQ: Could not find content start/end boundary for turn __\n\nA: The specific turn’s start/end could not be detected. Please ensure you have set the eos_token following your chat_template. Otherwise, this could be a chat_template which doesn’t use proper boundaries for each turn (like system). On the rare occurrence, make sure your content is not [[dummy_message]]. Please let us know about this.\n\nQ: Content end boundary is before start boundary for turn ___\n\nA: This is an edge case which should not occur. Please create an Issue if this happens.\n\nQ: Content end boundary is the same as start boundary for turn ___. This is likely an empty turn.\n\nA: This is likely an empty turn.\n\nQ: The EOS/EOT token is incorrectly being masked or not being masked.\n\nA: This is because of the mismatch between tokenizer.eos_token and EOS/EOT token in template. Please make sure to set eos_token under special_tokens to the same EOS/EOT token as in template.\n\nQ: “chat_template choice is tokenizer_default but tokenizer’s chat_template is null. Please add a chat_template in tokenizer config”\n\nA: This is because the tokenizer does not have a chat template. Please add a chat template in the tokenizer config. See chat_template for more details.", - "crumbs": [ - "Troubleshooting", - "FAQ" - ] - }, - { - "objectID": "docs/batch_vs_grad.html", - "href": "docs/batch_vs_grad.html", - "title": "Batch size vs Gradient accumulation", - "section": "", - "text": "Gradient accumulation means accumulating gradients over several mini-batches and updating the model weights afterward. When the samples in each batch are diverse, this technique doesn’t significantly impact learning.\nThis method allows for effective training with larger effective batch sizes without needing proportionally larger memory. Here’s why:\n\nMemory Consumption with Batch Size: The primary reason increasing the batch size impacts memory is due to the storage requirements for intermediate activations. When you forward propagate a batch through a network, you have to store the activations at each layer for each sample in the batch, because these activations are used during backpropagation to compute gradients. Therefore, larger batches mean more activations, leading to greater GPU memory consumption.\nGradient Accumulation: With gradient accumulation, you’re effectively simulating a larger batch size by accumulating gradients over several smaller batches (or micro-batches). However, at any given time, you’re only forward and backward propagating a micro-batch. This means you only store activations for the micro-batch, not the full accumulated batch. As a result, you can simulate the effect of a larger batch size without the memory cost of storing activations for a large batch.\n\nExample 1:\nMicro batch size: 3\nGradient accumulation steps: 2\nNumber of GPUs: 3\nTotal batch size = 3 * 2 * 3 = 18\n| GPU 1 | GPU 2 | GPU 3 |\n|----------------|----------------|----------------|\n| S1, S2, S3 | S4, S5, S6 | S7, S8, S9 |\n| e1, e2, e3 | e4, e5, e6 | e7, e8, e9 |\n|----------------|----------------|----------------|\n| → (accumulate) | → (accumulate) | → (accumulate) |\n|----------------|----------------|----------------|\n| S10, S11, S12 | S13, S14, S15 | S16, S17, S18 |\n| e10, e11, e12 | e13, e14, e15 | e16, e17, e18 |\n|----------------|----------------|----------------|\n| → (apply) | → (apply) | → (apply) |\n\nAccumulated gradient for the weight w1 after the second iteration (considering all GPUs):\nTotal gradient for w1 = e1 + e2 + e3 + e4 + e5 + e6 + e7 + e8 + e9 + e10 + e11 + e12 + e13 + e14 + e15 + e16 + e17 + e18\n\nWeight update for w1:\nw1_new = w1_old - learning rate x (Total gradient for w1 / 18)\nExample 2:\nMicro batch size: 2\nGradient accumulation steps: 1\nNumber of GPUs: 3\nTotal batch size = 2 * 1 * 3 = 6\n| GPU 1 | GPU 2 | GPU 3 |\n|-----------|-----------|-----------|\n| S1, S2 | S3, S4 | S5, S6 |\n| e1, e2 | e3, e4 | e5, e6 |\n|-----------|-----------|-----------|\n| → (apply) | → (apply) | → (apply) |\n\nAccumulated gradient for the weight w1 (considering all GPUs):\nTotal gradient for w1 = e1 + e2 + e3 + e4 + e5 + e6\n\nWeight update for w1:\nw1_new = w1_old - learning rate × (Total gradient for w1 / 6)", + "text": "Because Flash Attention simply drops the attention mask, we do not need to\nconstruct a 4d attention mask. We only need to concatenate the sequences into\na single batch and let flash attention know where each new sequence begins.\n4k context, bsz =4,\neach character represents 256 tokens\nX represents a padding token\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n[[ A A A A A A A A A A A ]\n B B B B B B ]\n C C C C C C C ]\n D D D D ]]\n\n[[ E E E E E E E E ]\n [ F F F F ]\n [ G G G ]\n [ H H H H ]]\n\n[[ I I I ]\n [ J J J ]\n [ K K K K K]\n [ L L L ]]\nafter padding to longest input in each step\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n[[ A A A A A A A A A A A ]\n B B B B B B X X X X X X ]\n C C C C C C C X X X X ]\n D D D D X X X X X X X ]]\n\n[[ E E E E E E E E ]\n [ F F F F X X X X ]\n [ G G G X X X X X ]\n [ H H H H X X X X ]]\n\n[[ I I I X X ]\n [ J J J X X ]\n [ K K K K K ]\n [ L L L X X ]]\nw packing ( note it’s the same effective number of tokens per step, but a true bsz of 1)\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n[[ A A A A A A A A A A A B B B B B\n B C C C C C C C D D D D E E E E\n E E E E F F F F F G G G H H H H\n I I I J J J J K K K K K L L L X ]]\ncu_seqlens:\n[[ 0, 11, 17, 24, 28, 36, 41 44, 48, 51, 55, 60, 64]]", "crumbs": [ "Core Concepts", - "Batch size vs Gradient accumulation" + "Multipack (Sample Packing)" + ] + }, + { + "objectID": "docs/multipack.html#visualization-of-multipack-with-flash-attention", + "href": "docs/multipack.html#visualization-of-multipack-with-flash-attention", + "title": "Multipack (Sample Packing)", + "section": "", + "text": "Because Flash Attention simply drops the attention mask, we do not need to\nconstruct a 4d attention mask. We only need to concatenate the sequences into\na single batch and let flash attention know where each new sequence begins.\n4k context, bsz =4,\neach character represents 256 tokens\nX represents a padding token\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n[[ A A A A A A A A A A A ]\n B B B B B B ]\n C C C C C C C ]\n D D D D ]]\n\n[[ E E E E E E E E ]\n [ F F F F ]\n [ G G G ]\n [ H H H H ]]\n\n[[ I I I ]\n [ J J J ]\n [ K K K K K]\n [ L L L ]]\nafter padding to longest input in each step\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n[[ A A A A A A A A A A A ]\n B B B B B B X X X X X X ]\n C C C C C C C X X X X ]\n D D D D X X X X X X X ]]\n\n[[ E E E E E E E E ]\n [ F F F F X X X X ]\n [ G G G X X X X X ]\n [ H H H H X X X X ]]\n\n[[ I I I X X ]\n [ J J J X X ]\n [ K K K K K ]\n [ L L L X X ]]\nw packing ( note it’s the same effective number of tokens per step, but a true bsz of 1)\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n[[ A A A A A A A A A A A B B B B B\n B C C C C C C C D D D D E E E E\n E E E E F F F F F G G G H H H H\n I I I J J J J K K K K K L L L X ]]\ncu_seqlens:\n[[ 0, 11, 17, 24, 28, 36, 41 44, 48, 51, 55, 60, 64]]", + "crumbs": [ + "Core Concepts", + "Multipack (Sample Packing)" + ] + }, + { + "objectID": "docs/multipack.html#multipack-without-flash-attention", + "href": "docs/multipack.html#multipack-without-flash-attention", + "title": "Multipack (Sample Packing)", + "section": "Multipack without Flash Attention", + "text": "Multipack without Flash Attention\nMultipack can still be achieved without Flash attention, but with lower packing\nefficiency as we are not able to join multiple batches into a single batch due to\ncontext length limits without flash attention. We can use either Pytorch’s Scaled\nDot Product Attention implementation or native Pytorch attention implementation\nalong with 4d attention masks\nto pack sequences together and avoid cross attention.", + "crumbs": [ + "Core Concepts", + "Multipack (Sample Packing)" + ] + }, + { + "objectID": "docs/inference.html", + "href": "docs/inference.html", + "title": "Inference and Merging", + "section": "", + "text": "This guide covers how to use your trained models for inference, including model loading, interactive testing, merging adapters, and common troubleshooting steps.", + "crumbs": [ + "Getting Started", + "Inference and Merging" + ] + }, + { + "objectID": "docs/inference.html#sec-quickstart", + "href": "docs/inference.html#sec-quickstart", + "title": "Inference and Merging", + "section": "1 Quick Start", + "text": "1 Quick Start\n\n\n\n\n\n\nTip\n\n\n\nUse the same config used for training on inference/merging.\n\n\n\n1.1 Basic Inference\n\nLoRA ModelsFull Fine-tuned Models\n\n\naxolotl inference your_config.yml --lora-model-dir=\"./lora-output-dir\"\n\n\naxolotl inference your_config.yml --base-model=\"./completed-model\"", + "crumbs": [ + "Getting Started", + "Inference and Merging" + ] + }, + { + "objectID": "docs/inference.html#sec-advanced", + "href": "docs/inference.html#sec-advanced", + "title": "Inference and Merging", + "section": "2 Advanced Usage", + "text": "2 Advanced Usage\n\n2.1 Gradio Interface\nLaunch an interactive web interface:\naxolotl inference your_config.yml --gradio\n\n\n2.2 File-based Prompts\nProcess prompts from a text file:\ncat /tmp/prompt.txt | axolotl inference your_config.yml \\\n --base-model=\"./completed-model\" --prompter=None\n\n\n2.3 Memory Optimization\nFor large models or limited memory:\naxolotl inference your_config.yml --load-in-8bit=True", + "crumbs": [ + "Getting Started", + "Inference and Merging" + ] + }, + { + "objectID": "docs/inference.html#sec-merging", + "href": "docs/inference.html#sec-merging", + "title": "Inference and Merging", + "section": "3 Merging LoRA Weights", + "text": "3 Merging LoRA Weights\nMerge LoRA adapters with the base model:\naxolotl merge-lora your_config.yml --lora-model-dir=\"./completed-model\"\n\n3.1 Memory Management for Merging\n\nConfiguration OptionsForce CPU Merging\n\n\ngpu_memory_limit: 20GiB # Adjust based on your GPU\nlora_on_cpu: true # Process on CPU if needed\n\n\nCUDA_VISIBLE_DEVICES=\"\" axolotl merge-lora ...", + "crumbs": [ + "Getting Started", + "Inference and Merging" + ] + }, + { + "objectID": "docs/inference.html#sec-tokenization", + "href": "docs/inference.html#sec-tokenization", + "title": "Inference and Merging", + "section": "4 Tokenization", + "text": "4 Tokenization\n\n4.1 Common Issues\n\n\n\n\n\n\nWarning\n\n\n\nTokenization mismatches between training and inference are a common source of problems.\n\n\nTo debug:\n\nCheck training tokenization:\n\naxolotl preprocess your_config.yml --debug\n\nVerify inference tokenization by decoding tokens before model input\nCompare token IDs between training and inference\n\n\n\n4.2 Special Tokens\nConfigure special tokens in your YAML:\nspecial_tokens:\n bos_token: \"<s>\"\n eos_token: \"</s>\"\n unk_token: \"<unk>\"\ntokens:\n - \"<|im_start|>\"\n - \"<|im_end|>\"", + "crumbs": [ + "Getting Started", + "Inference and Merging" + ] + }, + { + "objectID": "docs/inference.html#sec-troubleshooting", + "href": "docs/inference.html#sec-troubleshooting", + "title": "Inference and Merging", + "section": "5 Troubleshooting", + "text": "5 Troubleshooting\n\n5.1 Common Problems\n\nMemory IssuesToken IssuesPerformance Issues\n\n\n\nUse 8-bit loading\nReduce batch sizes\nTry CPU offloading\n\n\n\n\nVerify special tokens\nCheck tokenizer settings\nCompare training and inference preprocessing\n\n\n\n\nVerify model loading\nCheck prompt formatting\nEnsure temperature/sampling settings\n\n\n\n\nFor more details, see our debugging guide.", + "crumbs": [ + "Getting Started", + "Inference and Merging" ] }, { diff --git a/sitemap.xml b/sitemap.xml index 2b9857d65..c7811e7c5 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,678 +2,682 @@ https://axolotl-ai-cloud.github.io/axolotl/examples/colab-notebooks/colab-axolotl-example.html - 2025-04-07T16:39:31.284Z + 2025-04-07T16:41:27.525Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/stepwise_supervised.html - 2025-04-07T16:39:31.280Z + 2025-04-07T16:41:27.521Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/template_free.html - 2025-04-07T16:39:31.280Z + 2025-04-07T16:41:27.521Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/tokenized.html - 2025-04-07T16:39:31.280Z + 2025-04-07T16:41:27.521Z https://axolotl-ai-cloud.github.io/axolotl/docs/nccl.html - 2025-04-07T16:39:31.283Z + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/amd_hpc.html - 2025-04-07T16:39:31.279Z + 2025-04-07T16:41:27.520Z https://axolotl-ai-cloud.github.io/axolotl/docs/config.html - 2025-04-07T16:39:31.279Z + 2025-04-07T16:41:27.520Z https://axolotl-ai-cloud.github.io/axolotl/docs/multi-gpu.html - 2025-04-07T16:39:31.283Z + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/installation.html - 2025-04-07T16:39:31.283Z + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/torchao.html - 2025-04-07T16:39:31.283Z + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/reward_modelling.html - 2025-04-07T16:39:31.283Z + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/input_output.html - 2025-04-07T16:39:31.283Z + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/multimodal.html - 2025-04-07T16:39:31.283Z + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.callbacks.mlflow_.html - 2025-04-07T16:40:03.307Z + 2025-04-07T16:42:04.119Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.trainer_fsdp_optim.html - 2025-04-07T16:40:02.898Z + 2025-04-07T16:42:03.710Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.data.batch_dataset_fetcher.html - 2025-04-07T16:40:02.914Z + 2025-04-07T16:42:03.727Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.stepwise_supervised.html - 2025-04-07T16:40:02.607Z + 2025-04-07T16:42:03.418Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.mistral_attn_hijack_flash.html - 2025-04-07T16:40:02.847Z + 2025-04-07T16:42:03.659Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.dpo.user_defined.html - 2025-04-07T16:40:02.653Z + 2025-04-07T16:42:03.464Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.liger.args.html - 2025-04-07T16:40:03.221Z + 2025-04-07T16:42:04.034Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.training.html - 2025-04-07T16:40:03.084Z + 2025-04-07T16:42:03.897Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/datasets.html - 2025-04-07T16:40:02.104Z + 2025-04-07T16:42:02.857Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/kernels.geglu.html - 2025-04-07T16:40:02.787Z + 2025-04-07T16:42:03.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.llama_attn_hijack_flash.html - 2025-04-07T16:40:02.831Z + 2025-04-07T16:42:03.643Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.sweeps.html - 2025-04-07T16:40:02.441Z + 2025-04-07T16:42:03.251Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.freeze.html - 2025-04-07T16:40:02.986Z + 2025-04-07T16:42:03.799Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.multipack.html - 2025-04-07T16:40:02.848Z + 2025-04-07T16:42:03.660Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.main.html - 2025-04-07T16:40:02.336Z + 2025-04-07T16:42:03.106Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.trainers.trl.html - 2025-04-07T16:40:02.517Z + 2025-04-07T16:42:03.328Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.dpo.passthrough.html - 2025-04-07T16:40:02.654Z + 2025-04-07T16:42:03.466Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.chat.format.llama3x.html - 2025-04-07T16:40:02.290Z + 2025-04-07T16:42:03.061Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.datasets.transforms.chat_builder.html - 2025-04-07T16:40:02.305Z + 2025-04-07T16:42:03.075Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.kto.user_defined.html - 2025-04-07T16:40:02.671Z + 2025-04-07T16:42:03.483Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.collators.mamba.html - 2025-04-07T16:40:03.279Z + 2025-04-07T16:42:04.091Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.base.html - 2025-04-07T16:40:03.206Z + 2025-04-07T16:42:04.019Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.bench.html - 2025-04-07T16:40:02.978Z + 2025-04-07T16:42:03.791Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/kernels.swiglu.html - 2025-04-07T16:40:02.797Z + 2025-04-07T16:42:03.609Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.chat.format.shared.html - 2025-04-07T16:40:02.292Z + 2025-04-07T16:42:03.062Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.cut_cross_entropy.args.html - 2025-04-07T16:40:03.210Z + 2025-04-07T16:42:04.022Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.datasets.chat.html - 2025-04-07T16:40:02.297Z + 2025-04-07T16:42:03.067Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.callbacks.lisa.html - 2025-04-07T16:40:03.304Z + 2025-04-07T16:42:04.116Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.grokfast.optimizer.html - 2025-04-07T16:40:03.211Z + 2025-04-07T16:42:04.023Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.alpaca_chat.html - 2025-04-07T16:40:02.557Z + 2025-04-07T16:42:03.367Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.alpaca_instruct.html - 2025-04-07T16:40:02.558Z + 2025-04-07T16:42:03.369Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.kto.chatml.html - 2025-04-07T16:40:02.670Z + 2025-04-07T16:42:03.481Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.integrations.html - 2025-04-07T16:40:03.131Z + 2025-04-07T16:42:03.943Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.trl.html - 2025-04-07T16:40:03.114Z + 2025-04-07T16:42:03.926Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_tokenizers.html - 2025-04-07T16:40:02.159Z + 2025-04-07T16:42:02.913Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.data.sft.html - 2025-04-07T16:40:03.061Z + 2025-04-07T16:42:03.874Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schedulers.html - 2025-04-07T16:40:03.027Z + 2025-04-07T16:42:03.840Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.chat_templates.html - 2025-04-07T16:40:02.961Z + 2025-04-07T16:42:03.774Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.models.html - 2025-04-07T16:40:02.944Z + 2025-04-07T16:42:03.757Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.dpo.chatml.html - 2025-04-07T16:40:02.650Z + 2025-04-07T16:42:03.461Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.distributed.html - 2025-04-07T16:40:03.048Z + 2025-04-07T16:42:03.860Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.utils.html - 2025-04-07T16:40:02.887Z + 2025-04-07T16:42:03.699Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.utils.html - 2025-04-07T16:40:03.143Z + 2025-04-07T16:42:03.955Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.llama_expand_mask.html - 2025-04-07T16:40:02.857Z + 2025-04-07T16:42:03.669Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/common.datasets.html - 2025-04-07T16:40:03.247Z + 2025-04-07T16:42:04.060Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/logging_config.html - 2025-04-07T16:40:02.164Z + 2025-04-07T16:42:02.920Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/kernels.quantize.html - 2025-04-07T16:40:02.804Z + 2025-04-07T16:42:03.617Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.llama_patch_multipack.html - 2025-04-07T16:40:02.890Z + 2025-04-07T16:42:03.702Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.model.html - 2025-04-07T16:40:03.079Z + 2025-04-07T16:42:03.892Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.stablelm_attn_hijack_flash.html - 2025-04-07T16:40:02.895Z + 2025-04-07T16:42:03.707Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.mixtral.html - 2025-04-07T16:40:02.916Z + 2025-04-07T16:42:03.728Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.tokenization.html - 2025-04-07T16:40:02.951Z + 2025-04-07T16:42:03.764Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.kd.trainer.html - 2025-04-07T16:40:03.218Z + 2025-04-07T16:42:04.030Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.datasets.html - 2025-04-07T16:40:03.102Z + 2025-04-07T16:42:03.914Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.collators.core.html - 2025-04-07T16:40:03.250Z + 2025-04-07T16:42:04.062Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.btlm_attn_hijack_flash.html - 2025-04-07T16:40:02.888Z + 2025-04-07T16:42:03.700Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.optimizers.adopt.html - 2025-04-07T16:40:03.058Z + 2025-04-07T16:42:03.871Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.input_output.html - 2025-04-07T16:40:02.602Z + 2025-04-07T16:42:03.413Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/index.html - 2025-04-07T16:40:02.026Z + 2025-04-07T16:42:02.778Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.cloud.modal_.html - 2025-04-07T16:40:02.486Z + 2025-04-07T16:42:03.297Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.dpo.llama3.html - 2025-04-07T16:40:02.640Z + 2025-04-07T16:42:03.451Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.train.html - 2025-04-07T16:40:02.344Z + 2025-04-07T16:42:03.114Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.trainer_builder.html - 2025-04-07T16:40:02.180Z + 2025-04-07T16:42:02.943Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.callbacks.perplexity.html - 2025-04-07T16:40:03.299Z + 2025-04-07T16:42:04.110Z https://axolotl-ai-cloud.github.io/axolotl/docs/getting-started.html - 2025-04-07T16:39:31.280Z + 2025-04-07T16:41:27.521Z - https://axolotl-ai-cloud.github.io/axolotl/docs/inference.html - 2025-04-07T16:39:31.283Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/multipack.html - 2025-04-07T16:39:31.283Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/sequence_parallelism.html - 2025-04-07T16:39:31.283Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/multi-node.html - 2025-04-07T16:39:31.283Z - - - https://axolotl-ai-cloud.github.io/axolotl/FAQS.html - 2025-04-07T16:39:31.278Z - - - https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html - 2025-04-07T16:39:31.299Z - - - https://axolotl-ai-cloud.github.io/axolotl/index.html - 2025-04-07T16:39:31.295Z - - - https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/LICENSE.html - 2025-04-07T16:39:31.299Z - - - https://axolotl-ai-cloud.github.io/axolotl/TODO.html - 2025-04-07T16:39:31.278Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/lr_groups.html - 2025-04-07T16:39:31.283Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/debugging.html - 2025-04-07T16:39:31.280Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/faq.html - 2025-04-07T16:39:31.280Z + https://axolotl-ai-cloud.github.io/axolotl/docs/dataset_loading.html + 2025-04-07T16:41:27.521Z https://axolotl-ai-cloud.github.io/axolotl/docs/batch_vs_grad.html - 2025-04-07T16:39:31.279Z + 2025-04-07T16:41:27.520Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/faq.html + 2025-04-07T16:41:27.521Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/debugging.html + 2025-04-07T16:41:27.521Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/lr_groups.html + 2025-04-07T16:41:27.524Z + + + https://axolotl-ai-cloud.github.io/axolotl/TODO.html + 2025-04-07T16:41:27.519Z + + + https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/LICENSE.html + 2025-04-07T16:41:27.540Z + + + https://axolotl-ai-cloud.github.io/axolotl/index.html + 2025-04-07T16:41:27.537Z + + + https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html + 2025-04-07T16:41:27.540Z + + + https://axolotl-ai-cloud.github.io/axolotl/FAQS.html + 2025-04-07T16:41:27.519Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/multi-node.html + 2025-04-07T16:41:27.524Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/sequence_parallelism.html + 2025-04-07T16:41:27.524Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/multipack.html + 2025-04-07T16:41:27.524Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/inference.html + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html - 2025-04-07T16:39:31.283Z + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.lora_embeddings.html - 2025-04-07T16:40:02.970Z + 2025-04-07T16:42:03.782Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/kernels.utils.html - 2025-04-07T16:40:02.806Z + 2025-04-07T16:42:03.618Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.chat_template.html - 2025-04-07T16:40:02.543Z + 2025-04-07T16:42:03.354Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/convert.html - 2025-04-07T16:40:02.117Z + 2025-04-07T16:42:02.870Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/common.const.html - 2025-04-07T16:40:03.231Z + 2025-04-07T16:42:04.043Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.cloud.base.html - 2025-04-07T16:40:02.480Z + 2025-04-07T16:42:03.291Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.relora.html - 2025-04-07T16:40:02.855Z + 2025-04-07T16:42:03.667Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.lora.html - 2025-04-07T16:40:02.966Z + 2025-04-07T16:42:03.779Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.merge_lora.html - 2025-04-07T16:40:02.415Z + 2025-04-07T16:42:03.225Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.bradley_terry.llama3.html - 2025-04-07T16:40:02.695Z + 2025-04-07T16:42:03.507Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.merge_sharded_fsdp_weights.html - 2025-04-07T16:40:02.427Z + 2025-04-07T16:42:03.237Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.spectrum.args.html - 2025-04-07T16:40:03.228Z + 2025-04-07T16:42:04.040Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/models.mamba.modeling_mamba.html - 2025-04-07T16:40:03.248Z + 2025-04-07T16:42:04.061Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/common.architectures.html - 2025-04-07T16:40:03.229Z + 2025-04-07T16:42:04.042Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.trainer.html - 2025-04-07T16:40:03.003Z + 2025-04-07T16:42:03.815Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.callbacks.comet_.html - 2025-04-07T16:40:03.311Z + 2025-04-07T16:42:04.123Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.vllm_serve.html - 2025-04-07T16:40:02.477Z + 2025-04-07T16:42:03.287Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.multimodal.html - 2025-04-07T16:40:03.119Z + 2025-04-07T16:42:03.931Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.gradient_checkpointing.unsloth.html - 2025-04-07T16:40:03.065Z + 2025-04-07T16:42:03.877Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.trainers.base.html - 2025-04-07T16:40:02.500Z + 2025-04-07T16:42:03.311Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.unsloth_.html - 2025-04-07T16:40:02.906Z + 2025-04-07T16:42:03.718Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.samplers.multipack.html - 2025-04-07T16:40:03.292Z + 2025-04-07T16:42:04.104Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.callbacks.profiler.html - 2025-04-07T16:40:03.302Z + 2025-04-07T16:42:04.114Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.lm_eval.args.html - 2025-04-07T16:40:03.225Z + 2025-04-07T16:42:04.037Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.data.pretraining.html - 2025-04-07T16:40:03.060Z + 2025-04-07T16:42:03.872Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/evaluate.html - 2025-04-07T16:40:02.097Z + 2025-04-07T16:42:02.849Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.dict.html - 2025-04-07T16:40:03.051Z + 2025-04-07T16:42:03.863Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.utils.html - 2025-04-07T16:40:02.472Z + 2025-04-07T16:42:03.283Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.pygmalion.html - 2025-04-07T16:40:02.624Z + 2025-04-07T16:42:03.435Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.training_args.html - 2025-04-07T16:40:02.265Z + 2025-04-07T16:42:03.035Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.inference.html - 2025-04-07T16:40:02.407Z + 2025-04-07T16:42:03.217Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/kernels.lora.html - 2025-04-07T16:40:02.776Z + 2025-04-07T16:42:03.589Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.evaluate.html - 2025-04-07T16:40:02.352Z + 2025-04-07T16:42:03.122Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.collators.batching.html - 2025-04-07T16:40:03.276Z + 2025-04-07T16:42:04.088Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.completion.html - 2025-04-07T16:40:02.596Z + 2025-04-07T16:42:03.407Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.dpo.zephyr.html - 2025-04-07T16:40:02.651Z + 2025-04-07T16:42:03.463Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.metharme.html - 2025-04-07T16:40:02.613Z + 2025-04-07T16:42:03.425Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.orpo.chat_template.html - 2025-04-07T16:40:02.692Z + 2025-04-07T16:42:03.504Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.alpaca_w_system.html - 2025-04-07T16:40:02.570Z + 2025-04-07T16:42:03.381Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.model_shard_quant.html - 2025-04-07T16:40:02.975Z + 2025-04-07T16:42:03.787Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.config.html - 2025-04-07T16:40:02.393Z + 2025-04-07T16:42:03.192Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.enums.html - 2025-04-07T16:40:03.137Z + 2025-04-07T16:42:03.949Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.preprocess.html - 2025-04-07T16:40:02.435Z + 2025-04-07T16:42:03.245Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.chat.messages.html - 2025-04-07T16:40:02.287Z + 2025-04-07T16:42:03.058Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.dpo.chat_template.html - 2025-04-07T16:40:02.630Z + 2025-04-07T16:42:03.441Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.peft.html - 2025-04-07T16:40:03.110Z + 2025-04-07T16:42:03.922Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/train.html - 2025-04-07T16:40:02.086Z + 2025-04-07T16:42:02.839Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.messages.chat.html - 2025-04-07T16:40:02.628Z + 2025-04-07T16:42:03.440Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.orcamini.html - 2025-04-07T16:40:02.617Z + 2025-04-07T16:42:03.429Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.collators.mm_chat.html - 2025-04-07T16:40:03.284Z + 2025-04-07T16:42:04.096Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.kto.llama3.html - 2025-04-07T16:40:02.662Z + 2025-04-07T16:42:03.473Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.attention.mllama.html - 2025-04-07T16:40:02.913Z + 2025-04-07T16:42:03.725Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.checks.html - 2025-04-07T16:40:02.375Z + 2025-04-07T16:42:03.161Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.transformers_fa_utils.html - 2025-04-07T16:40:02.905Z + 2025-04-07T16:42:03.717Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.llama_attn_hijack_xformers.html - 2025-04-07T16:40:02.832Z + 2025-04-07T16:42:03.645Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.trainers.dpo.trainer.html - 2025-04-07T16:40:02.523Z + 2025-04-07T16:42:03.334Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.user_defined.html - 2025-04-07T16:40:02.578Z + 2025-04-07T16:42:03.389Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.args.html - 2025-04-07T16:40:02.369Z + 2025-04-07T16:42:03.150Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.llama2_chat.html - 2025-04-07T16:40:02.590Z + 2025-04-07T16:42:03.401Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.config.html - 2025-04-07T16:40:03.073Z + 2025-04-07T16:42:03.885Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.trainers.grpo.trainer.html - 2025-04-07T16:40:02.526Z + 2025-04-07T16:42:03.338Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.chat.format.chatml.html - 2025-04-07T16:40:02.289Z + 2025-04-07T16:42:03.059Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.lora_kernels.html - 2025-04-07T16:40:02.879Z + 2025-04-07T16:42:03.691Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.base.html - 2025-04-07T16:40:02.528Z + 2025-04-07T16:42:03.339Z https://axolotl-ai-cloud.github.io/axolotl/docs/rlhf.html - 2025-04-07T16:39:31.283Z + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/cli.html - 2025-04-07T16:39:31.279Z + 2025-04-07T16:41:27.520Z https://axolotl-ai-cloud.github.io/axolotl/docs/unsloth.html - 2025-04-07T16:39:31.283Z + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/fsdp_qlora.html - 2025-04-07T16:39:31.280Z + 2025-04-07T16:41:27.521Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset_preprocessing.html - 2025-04-07T16:39:31.280Z + 2025-04-07T16:41:27.521Z https://axolotl-ai-cloud.github.io/axolotl/docs/custom_integrations.html - 2025-04-07T16:39:31.279Z + 2025-04-07T16:41:27.521Z https://axolotl-ai-cloud.github.io/axolotl/docs/mac.html - 2025-04-07T16:39:31.283Z + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/docker.html - 2025-04-07T16:39:31.280Z + 2025-04-07T16:41:27.521Z https://axolotl-ai-cloud.github.io/axolotl/docs/ray-integration.html - 2025-04-07T16:39:31.283Z + 2025-04-07T16:41:27.524Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/index.html - 2025-04-07T16:39:31.279Z + 2025-04-07T16:41:27.521Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/conversation.html - 2025-04-07T16:39:31.279Z + 2025-04-07T16:41:27.521Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/pretraining.html - 2025-04-07T16:39:31.280Z + 2025-04-07T16:41:27.521Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/inst_tune.html - 2025-04-07T16:39:31.279Z + 2025-04-07T16:41:27.521Z diff --git a/src/axolotl/integrations/LICENSE.html b/src/axolotl/integrations/LICENSE.html index 7f9b7f868..e48e2845a 100644 --- a/src/axolotl/integrations/LICENSE.html +++ b/src/axolotl/integrations/LICENSE.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + + diff --git a/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html b/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html index d8e86c51b..6780604f6 100644 --- a/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html +++ b/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html @@ -292,6 +292,12 @@ ul.task-list li input[type="checkbox"] { LoRA Optimizations + +