From 5c57c40993a3184d02d0472efd75cf5be0057910 Mon Sep 17 00:00:00 2001 From: Quarto GHA Workflow Runner Date: Mon, 31 Mar 2025 13:16:19 +0000 Subject: [PATCH] Built site for gh-pages --- .nojekyll | 2 +- FAQS.html | 6 + TODO.html | 6 + docs/amd_hpc.html | 6 + docs/api/cli.args.html | 6 + docs/api/cli.checks.html | 6 + docs/api/cli.cloud.base.html | 6 + docs/api/cli.cloud.modal_.html | 6 + docs/api/cli.config.html | 6 + docs/api/cli.evaluate.html | 6 + docs/api/cli.inference.html | 6 + docs/api/cli.main.html | 6 + docs/api/cli.merge_lora.html | 6 + docs/api/cli.merge_sharded_fsdp_weights.html | 6 + docs/api/cli.preprocess.html | 6 + docs/api/cli.sweeps.html | 6 + docs/api/cli.train.html | 6 + docs/api/cli.utils.html | 6 + docs/api/common.architectures.html | 6 + docs/api/common.const.html | 6 + docs/api/common.datasets.html | 6 + docs/api/convert.html | 6 + docs/api/core.chat.format.chatml.html | 6 + docs/api/core.chat.format.llama3x.html | 6 + docs/api/core.chat.format.shared.html | 6 + docs/api/core.chat.messages.html | 6 + docs/api/core.datasets.chat.html | 6 + ...core.datasets.transforms.chat_builder.html | 6 + docs/api/core.trainer_builder.html | 6 + docs/api/core.trainers.base.html | 6 + docs/api/core.trainers.dpo.trainer.html | 6 + docs/api/core.trainers.grpo.trainer.html | 6 + docs/api/core.trainers.trl.html | 6 + docs/api/core.training_args.html | 6 + docs/api/datasets.html | 6 + docs/api/evaluate.html | 6 + docs/api/index.html | 6 + docs/api/integrations.base.html | 6 + .../integrations.cut_cross_entropy.args.html | 6 + docs/api/integrations.grokfast.optimizer.html | 6 + docs/api/integrations.kd.trainer.html | 6 + docs/api/integrations.liger.args.html | 6 + docs/api/integrations.lm_eval.args.html | 6 + docs/api/integrations.spectrum.args.html | 6 + docs/api/kernels.geglu.html | 6 + docs/api/kernels.lora.html | 6 + docs/api/kernels.quantize.html | 6 + docs/api/kernels.swiglu.html | 6 + docs/api/kernels.utils.html | 6 + docs/api/logging_config.html | 6 + docs/api/models.mamba.modeling_mamba.html | 6 + docs/api/monkeypatch.attention.mllama.html | 6 + .../monkeypatch.btlm_attn_hijack_flash.html | 6 + ...onkeypatch.data.batch_dataset_fetcher.html | 6 + .../monkeypatch.llama_attn_hijack_flash.html | 6 + ...onkeypatch.llama_attn_hijack_xformers.html | 6 + docs/api/monkeypatch.llama_expand_mask.html | 6 + .../monkeypatch.llama_patch_multipack.html | 6 + docs/api/monkeypatch.lora_kernels.html | 6 + ...monkeypatch.mistral_attn_hijack_flash.html | 6 + docs/api/monkeypatch.mixtral.html | 6 + docs/api/monkeypatch.multipack.html | 6 + docs/api/monkeypatch.relora.html | 6 + ...onkeypatch.stablelm_attn_hijack_flash.html | 6 + docs/api/monkeypatch.trainer_fsdp_optim.html | 6 + .../monkeypatch.transformers_fa_utils.html | 6 + docs/api/monkeypatch.unsloth_.html | 6 + docs/api/monkeypatch.utils.html | 6 + docs/api/prompt_strategies.alpaca_chat.html | 6 + .../prompt_strategies.alpaca_instruct.html | 6 + .../prompt_strategies.alpaca_w_system.html | 6 + docs/api/prompt_strategies.base.html | 6 + ...rompt_strategies.bradley_terry.llama3.html | 6 + docs/api/prompt_strategies.chat_template.html | 6 + docs/api/prompt_strategies.completion.html | 6 + .../prompt_strategies.dpo.chat_template.html | 6 + docs/api/prompt_strategies.dpo.chatml.html | 6 + docs/api/prompt_strategies.dpo.llama3.html | 6 + .../prompt_strategies.dpo.passthrough.html | 6 + .../prompt_strategies.dpo.user_defined.html | 6 + docs/api/prompt_strategies.dpo.zephyr.html | 6 + docs/api/prompt_strategies.input_output.html | 6 + docs/api/prompt_strategies.kto.chatml.html | 6 + docs/api/prompt_strategies.kto.llama3.html | 6 + .../prompt_strategies.kto.user_defined.html | 6 + docs/api/prompt_strategies.llama2_chat.html | 6 + docs/api/prompt_strategies.messages.chat.html | 6 + docs/api/prompt_strategies.metharme.html | 6 + docs/api/prompt_strategies.orcamini.html | 6 + .../prompt_strategies.orpo.chat_template.html | 6 + docs/api/prompt_strategies.pygmalion.html | 6 + ...prompt_strategies.stepwise_supervised.html | 6 + docs/api/prompt_strategies.user_defined.html | 6 + docs/api/prompt_tokenizers.html | 6 + docs/api/train.html | 6 + docs/api/utils.bench.html | 6 + docs/api/utils.callbacks.comet_.html | 6 + docs/api/utils.callbacks.lisa.html | 6 + docs/api/utils.callbacks.mlflow_.html | 6 + docs/api/utils.callbacks.perplexity.html | 6 + docs/api/utils.callbacks.profiler.html | 6 + docs/api/utils.chat_templates.html | 6 + docs/api/utils.collators.batching.html | 6 + docs/api/utils.collators.core.html | 6 + docs/api/utils.collators.mamba.html | 6 + docs/api/utils.collators.mm_chat.html | 6 + docs/api/utils.data.pretraining.html | 6 + docs/api/utils.data.sft.html | 6 + docs/api/utils.dict.html | 6 + docs/api/utils.distributed.html | 6 + docs/api/utils.freeze.html | 6 + .../utils.gradient_checkpointing.unsloth.html | 6 + docs/api/utils.lora.html | 6 + docs/api/utils.lora_embeddings.html | 6 + docs/api/utils.model_shard_quant.html | 6 + docs/api/utils.models.html | 6 + docs/api/utils.optimizers.adopt.html | 6 + docs/api/utils.samplers.multipack.html | 6 + docs/api/utils.schedulers.html | 6 + docs/api/utils.schemas.config.html | 6 + docs/api/utils.schemas.datasets.html | 6 + docs/api/utils.schemas.enums.html | 6 + docs/api/utils.schemas.integrations.html | 6 + docs/api/utils.schemas.model.html | 6 + docs/api/utils.schemas.multimodal.html | 6 + docs/api/utils.schemas.peft.html | 6 + docs/api/utils.schemas.training.html | 6 + docs/api/utils.schemas.trl.html | 6 + docs/api/utils.schemas.utils.html | 6 + docs/api/utils.tokenization.html | 6 + docs/api/utils.trainer.html | 6 + docs/batch_vs_grad.html | 6 + docs/cli.html | 6 + docs/config.html | 31 +- docs/custom_integrations.html | 6 + docs/dataset-formats/conversation.html | 6 + docs/dataset-formats/index.html | 6 + docs/dataset-formats/inst_tune.html | 6 + docs/dataset-formats/pretraining.html | 6 + docs/dataset-formats/stepwise_supervised.html | 6 + docs/dataset-formats/template_free.html | 6 + docs/dataset-formats/tokenized.html | 6 + docs/dataset_preprocessing.html | 6 + docs/debugging.html | 6 + docs/docker.html | 6 + docs/faq.html | 6 + docs/fsdp_qlora.html | 6 + docs/getting-started.html | 6 + docs/inference.html | 6 + docs/input_output.html | 6 + docs/installation.html | 6 + docs/lora_optims.html | 6 + docs/lr_groups.html | 6 + docs/mac.html | 6 + docs/multi-gpu.html | 293 ++++++++------- docs/multi-node.html | 6 + docs/multimodal.html | 6 + docs/multipack.html | 6 + docs/nccl.html | 6 + docs/ray-integration.html | 6 + docs/reward_modelling.html | 6 + docs/rlhf.html | 6 + docs/sequence_parallelism.html | 31 +- docs/torchao.html | 6 + docs/unsloth.html | 6 + .../colab-axolotl-example.html | 6 + index.html | 6 + search.json | 79 +++- sitemap.xml | 336 +++++++++--------- src/axolotl/integrations/LICENSE.html | 6 + .../cut_cross_entropy/ACKNOWLEDGEMENTS.html | 6 + 171 files changed, 1435 insertions(+), 327 deletions(-) diff --git a/.nojekyll b/.nojekyll index 9626b1ed2..cf08ddb8d 100644 --- a/.nojekyll +++ b/.nojekyll @@ -1 +1 @@ -ebdd0027 \ No newline at end of file +39edd9fd \ No newline at end of file diff --git a/FAQS.html b/FAQS.html index 5740e0f93..bbab93fce 100644 --- a/FAQS.html +++ b/FAQS.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/TODO.html b/TODO.html index a9f4a1825..c28388cb0 100644 --- a/TODO.html +++ b/TODO.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/amd_hpc.html b/docs/amd_hpc.html index c91489cc2..ff22ea617 100644 --- a/docs/amd_hpc.html +++ b/docs/amd_hpc.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.args.html b/docs/api/cli.args.html index 17872efda..99df713fb 100644 --- a/docs/api/cli.args.html +++ b/docs/api/cli.args.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.checks.html b/docs/api/cli.checks.html index 040c32ab3..47382ba14 100644 --- a/docs/api/cli.checks.html +++ b/docs/api/cli.checks.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.cloud.base.html b/docs/api/cli.cloud.base.html index 6ce51a4c9..36750d8c6 100644 --- a/docs/api/cli.cloud.base.html +++ b/docs/api/cli.cloud.base.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.cloud.modal_.html b/docs/api/cli.cloud.modal_.html index 5d2e05238..91a096900 100644 --- a/docs/api/cli.cloud.modal_.html +++ b/docs/api/cli.cloud.modal_.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.config.html b/docs/api/cli.config.html index 982879d64..df9270587 100644 --- a/docs/api/cli.config.html +++ b/docs/api/cli.config.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.evaluate.html b/docs/api/cli.evaluate.html index 86f088b87..91813e6d9 100644 --- a/docs/api/cli.evaluate.html +++ b/docs/api/cli.evaluate.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.inference.html b/docs/api/cli.inference.html index 2dbab4262..803478b07 100644 --- a/docs/api/cli.inference.html +++ b/docs/api/cli.inference.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.main.html b/docs/api/cli.main.html index ba47e7343..e2e93db8d 100644 --- a/docs/api/cli.main.html +++ b/docs/api/cli.main.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.merge_lora.html b/docs/api/cli.merge_lora.html index 822dc0a15..90c80e09f 100644 --- a/docs/api/cli.merge_lora.html +++ b/docs/api/cli.merge_lora.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.merge_sharded_fsdp_weights.html b/docs/api/cli.merge_sharded_fsdp_weights.html index 3eed28f14..66d8804cc 100644 --- a/docs/api/cli.merge_sharded_fsdp_weights.html +++ b/docs/api/cli.merge_sharded_fsdp_weights.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.preprocess.html b/docs/api/cli.preprocess.html index 41491a6e7..220ab802a 100644 --- a/docs/api/cli.preprocess.html +++ b/docs/api/cli.preprocess.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.sweeps.html b/docs/api/cli.sweeps.html index e19af7095..60033060e 100644 --- a/docs/api/cli.sweeps.html +++ b/docs/api/cli.sweeps.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.train.html b/docs/api/cli.train.html index d12b1fab3..fb5abd25e 100644 --- a/docs/api/cli.train.html +++ b/docs/api/cli.train.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/cli.utils.html b/docs/api/cli.utils.html index 9d2ad4296..191b22bed 100644 --- a/docs/api/cli.utils.html +++ b/docs/api/cli.utils.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/common.architectures.html b/docs/api/common.architectures.html index d6cc08341..b4f1faac6 100644 --- a/docs/api/common.architectures.html +++ b/docs/api/common.architectures.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/common.const.html b/docs/api/common.const.html index f603bff11..dabe55954 100644 --- a/docs/api/common.const.html +++ b/docs/api/common.const.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/common.datasets.html b/docs/api/common.datasets.html index a5428ecd4..d6e58e059 100644 --- a/docs/api/common.datasets.html +++ b/docs/api/common.datasets.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/convert.html b/docs/api/convert.html index 2f4200653..1b45085f0 100644 --- a/docs/api/convert.html +++ b/docs/api/convert.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/core.chat.format.chatml.html b/docs/api/core.chat.format.chatml.html index 1ea8ff2d3..93b80bbc4 100644 --- a/docs/api/core.chat.format.chatml.html +++ b/docs/api/core.chat.format.chatml.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/core.chat.format.llama3x.html b/docs/api/core.chat.format.llama3x.html index 2d7db1ad6..85e88fd0f 100644 --- a/docs/api/core.chat.format.llama3x.html +++ b/docs/api/core.chat.format.llama3x.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/core.chat.format.shared.html b/docs/api/core.chat.format.shared.html index 6cac7670e..4f651ccfe 100644 --- a/docs/api/core.chat.format.shared.html +++ b/docs/api/core.chat.format.shared.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/core.chat.messages.html b/docs/api/core.chat.messages.html index 237e42121..8cc9a7d9e 100644 --- a/docs/api/core.chat.messages.html +++ b/docs/api/core.chat.messages.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/core.datasets.chat.html b/docs/api/core.datasets.chat.html index 19e2d8410..88c392d85 100644 --- a/docs/api/core.datasets.chat.html +++ b/docs/api/core.datasets.chat.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/core.datasets.transforms.chat_builder.html b/docs/api/core.datasets.transforms.chat_builder.html index 282256361..20ae4662c 100644 --- a/docs/api/core.datasets.transforms.chat_builder.html +++ b/docs/api/core.datasets.transforms.chat_builder.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/core.trainer_builder.html b/docs/api/core.trainer_builder.html index 007be2f6e..ab69ac263 100644 --- a/docs/api/core.trainer_builder.html +++ b/docs/api/core.trainer_builder.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/core.trainers.base.html b/docs/api/core.trainers.base.html index 2b30b1c2d..dfe865ff3 100644 --- a/docs/api/core.trainers.base.html +++ b/docs/api/core.trainers.base.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/core.trainers.dpo.trainer.html b/docs/api/core.trainers.dpo.trainer.html index a970e12f9..082f932fc 100644 --- a/docs/api/core.trainers.dpo.trainer.html +++ b/docs/api/core.trainers.dpo.trainer.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/core.trainers.grpo.trainer.html b/docs/api/core.trainers.grpo.trainer.html index 03a77251a..c9b1c24df 100644 --- a/docs/api/core.trainers.grpo.trainer.html +++ b/docs/api/core.trainers.grpo.trainer.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/core.trainers.trl.html b/docs/api/core.trainers.trl.html index 8e570396c..93393172d 100644 --- a/docs/api/core.trainers.trl.html +++ b/docs/api/core.trainers.trl.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/core.training_args.html b/docs/api/core.training_args.html index 2a48a1e55..3af5b2e15 100644 --- a/docs/api/core.training_args.html +++ b/docs/api/core.training_args.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/datasets.html b/docs/api/datasets.html index 1c93cb4ea..219b40bcb 100644 --- a/docs/api/datasets.html +++ b/docs/api/datasets.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/evaluate.html b/docs/api/evaluate.html index de546c9c8..83a213db4 100644 --- a/docs/api/evaluate.html +++ b/docs/api/evaluate.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/index.html b/docs/api/index.html index d3c33b818..13392ba0c 100644 --- a/docs/api/index.html +++ b/docs/api/index.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/integrations.base.html b/docs/api/integrations.base.html index edb7eb5eb..aeeb6bad5 100644 --- a/docs/api/integrations.base.html +++ b/docs/api/integrations.base.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/integrations.cut_cross_entropy.args.html b/docs/api/integrations.cut_cross_entropy.args.html index fe038036a..e53930fec 100644 --- a/docs/api/integrations.cut_cross_entropy.args.html +++ b/docs/api/integrations.cut_cross_entropy.args.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/integrations.grokfast.optimizer.html b/docs/api/integrations.grokfast.optimizer.html index 6815a2abd..3628f6f06 100644 --- a/docs/api/integrations.grokfast.optimizer.html +++ b/docs/api/integrations.grokfast.optimizer.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/integrations.kd.trainer.html b/docs/api/integrations.kd.trainer.html index 4da97496b..08f645c7b 100644 --- a/docs/api/integrations.kd.trainer.html +++ b/docs/api/integrations.kd.trainer.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/integrations.liger.args.html b/docs/api/integrations.liger.args.html index b22e3b2c2..1f670d53e 100644 --- a/docs/api/integrations.liger.args.html +++ b/docs/api/integrations.liger.args.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/integrations.lm_eval.args.html b/docs/api/integrations.lm_eval.args.html index e6bc1801d..c2ad9f1af 100644 --- a/docs/api/integrations.lm_eval.args.html +++ b/docs/api/integrations.lm_eval.args.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/integrations.spectrum.args.html b/docs/api/integrations.spectrum.args.html index 333622832..0d1c885bc 100644 --- a/docs/api/integrations.spectrum.args.html +++ b/docs/api/integrations.spectrum.args.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/kernels.geglu.html b/docs/api/kernels.geglu.html index 05a8b8523..57bb692e6 100644 --- a/docs/api/kernels.geglu.html +++ b/docs/api/kernels.geglu.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/kernels.lora.html b/docs/api/kernels.lora.html index 2e4b446b1..e2800de6d 100644 --- a/docs/api/kernels.lora.html +++ b/docs/api/kernels.lora.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/kernels.quantize.html b/docs/api/kernels.quantize.html index 728379d94..d62e92a61 100644 --- a/docs/api/kernels.quantize.html +++ b/docs/api/kernels.quantize.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/kernels.swiglu.html b/docs/api/kernels.swiglu.html index 60e3ab2a3..5345fc6ea 100644 --- a/docs/api/kernels.swiglu.html +++ b/docs/api/kernels.swiglu.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/kernels.utils.html b/docs/api/kernels.utils.html index 12ed0665a..f379aa990 100644 --- a/docs/api/kernels.utils.html +++ b/docs/api/kernels.utils.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/logging_config.html b/docs/api/logging_config.html index de6ec9b16..5e61f9224 100644 --- a/docs/api/logging_config.html +++ b/docs/api/logging_config.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/models.mamba.modeling_mamba.html b/docs/api/models.mamba.modeling_mamba.html index 55fdb3fef..f0da95f8c 100644 --- a/docs/api/models.mamba.modeling_mamba.html +++ b/docs/api/models.mamba.modeling_mamba.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/monkeypatch.attention.mllama.html b/docs/api/monkeypatch.attention.mllama.html index a72a4cb90..c8c8a2a71 100644 --- a/docs/api/monkeypatch.attention.mllama.html +++ b/docs/api/monkeypatch.attention.mllama.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/monkeypatch.btlm_attn_hijack_flash.html b/docs/api/monkeypatch.btlm_attn_hijack_flash.html index 73c9db6ae..a659c458c 100644 --- a/docs/api/monkeypatch.btlm_attn_hijack_flash.html +++ b/docs/api/monkeypatch.btlm_attn_hijack_flash.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/monkeypatch.data.batch_dataset_fetcher.html b/docs/api/monkeypatch.data.batch_dataset_fetcher.html index 4758b2d10..5d2f0b137 100644 --- a/docs/api/monkeypatch.data.batch_dataset_fetcher.html +++ b/docs/api/monkeypatch.data.batch_dataset_fetcher.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/monkeypatch.llama_attn_hijack_flash.html b/docs/api/monkeypatch.llama_attn_hijack_flash.html index 4c990051a..d2a6d3e8f 100644 --- a/docs/api/monkeypatch.llama_attn_hijack_flash.html +++ b/docs/api/monkeypatch.llama_attn_hijack_flash.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/monkeypatch.llama_attn_hijack_xformers.html b/docs/api/monkeypatch.llama_attn_hijack_xformers.html index 728f56493..381d40dd1 100644 --- a/docs/api/monkeypatch.llama_attn_hijack_xformers.html +++ b/docs/api/monkeypatch.llama_attn_hijack_xformers.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/monkeypatch.llama_expand_mask.html b/docs/api/monkeypatch.llama_expand_mask.html index 59fa53579..6ef24b6bf 100644 --- a/docs/api/monkeypatch.llama_expand_mask.html +++ b/docs/api/monkeypatch.llama_expand_mask.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/monkeypatch.llama_patch_multipack.html b/docs/api/monkeypatch.llama_patch_multipack.html index 99e86a2b0..a0422bc8b 100644 --- a/docs/api/monkeypatch.llama_patch_multipack.html +++ b/docs/api/monkeypatch.llama_patch_multipack.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/monkeypatch.lora_kernels.html b/docs/api/monkeypatch.lora_kernels.html index 396e030bb..eb683235b 100644 --- a/docs/api/monkeypatch.lora_kernels.html +++ b/docs/api/monkeypatch.lora_kernels.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/monkeypatch.mistral_attn_hijack_flash.html b/docs/api/monkeypatch.mistral_attn_hijack_flash.html index 3519cd162..0c653e779 100644 --- a/docs/api/monkeypatch.mistral_attn_hijack_flash.html +++ b/docs/api/monkeypatch.mistral_attn_hijack_flash.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/monkeypatch.mixtral.html b/docs/api/monkeypatch.mixtral.html index 8f873daa4..8ebbb7fc7 100644 --- a/docs/api/monkeypatch.mixtral.html +++ b/docs/api/monkeypatch.mixtral.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/monkeypatch.multipack.html b/docs/api/monkeypatch.multipack.html index ef5cbbb9b..596e37c39 100644 --- a/docs/api/monkeypatch.multipack.html +++ b/docs/api/monkeypatch.multipack.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/monkeypatch.relora.html b/docs/api/monkeypatch.relora.html index 68c74b125..45d1b6a0d 100644 --- a/docs/api/monkeypatch.relora.html +++ b/docs/api/monkeypatch.relora.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/monkeypatch.stablelm_attn_hijack_flash.html b/docs/api/monkeypatch.stablelm_attn_hijack_flash.html index 545fd8e83..197aaaaf2 100644 --- a/docs/api/monkeypatch.stablelm_attn_hijack_flash.html +++ b/docs/api/monkeypatch.stablelm_attn_hijack_flash.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/monkeypatch.trainer_fsdp_optim.html b/docs/api/monkeypatch.trainer_fsdp_optim.html index 6d7a99aef..e780daad3 100644 --- a/docs/api/monkeypatch.trainer_fsdp_optim.html +++ b/docs/api/monkeypatch.trainer_fsdp_optim.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/monkeypatch.transformers_fa_utils.html b/docs/api/monkeypatch.transformers_fa_utils.html index 731302f53..7bc5774d0 100644 --- a/docs/api/monkeypatch.transformers_fa_utils.html +++ b/docs/api/monkeypatch.transformers_fa_utils.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/monkeypatch.unsloth_.html b/docs/api/monkeypatch.unsloth_.html index ab34c497a..a4289d6f1 100644 --- a/docs/api/monkeypatch.unsloth_.html +++ b/docs/api/monkeypatch.unsloth_.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/monkeypatch.utils.html b/docs/api/monkeypatch.utils.html index 5b38f35b6..8ce72dd4b 100644 --- a/docs/api/monkeypatch.utils.html +++ b/docs/api/monkeypatch.utils.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.alpaca_chat.html b/docs/api/prompt_strategies.alpaca_chat.html index 0b9614be9..137938460 100644 --- a/docs/api/prompt_strategies.alpaca_chat.html +++ b/docs/api/prompt_strategies.alpaca_chat.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.alpaca_instruct.html b/docs/api/prompt_strategies.alpaca_instruct.html index bd41d44b0..acccef446 100644 --- a/docs/api/prompt_strategies.alpaca_instruct.html +++ b/docs/api/prompt_strategies.alpaca_instruct.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/prompt_strategies.alpaca_w_system.html b/docs/api/prompt_strategies.alpaca_w_system.html index 8af301a47..0786bbe2c 100644 --- a/docs/api/prompt_strategies.alpaca_w_system.html +++ b/docs/api/prompt_strategies.alpaca_w_system.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.base.html b/docs/api/prompt_strategies.base.html index bd41bbd80..8a8a2e7c2 100644 --- a/docs/api/prompt_strategies.base.html +++ b/docs/api/prompt_strategies.base.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/prompt_strategies.bradley_terry.llama3.html b/docs/api/prompt_strategies.bradley_terry.llama3.html index bbdd60753..33f39545d 100644 --- a/docs/api/prompt_strategies.bradley_terry.llama3.html +++ b/docs/api/prompt_strategies.bradley_terry.llama3.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.chat_template.html b/docs/api/prompt_strategies.chat_template.html index f644357e0..dcac0b95f 100644 --- a/docs/api/prompt_strategies.chat_template.html +++ b/docs/api/prompt_strategies.chat_template.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.completion.html b/docs/api/prompt_strategies.completion.html index c96b7bd02..5bc40c8d8 100644 --- a/docs/api/prompt_strategies.completion.html +++ b/docs/api/prompt_strategies.completion.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.dpo.chat_template.html b/docs/api/prompt_strategies.dpo.chat_template.html index 92b36fc0f..93a239a98 100644 --- a/docs/api/prompt_strategies.dpo.chat_template.html +++ b/docs/api/prompt_strategies.dpo.chat_template.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/prompt_strategies.dpo.chatml.html b/docs/api/prompt_strategies.dpo.chatml.html index a72582e8c..10cf198e5 100644 --- a/docs/api/prompt_strategies.dpo.chatml.html +++ b/docs/api/prompt_strategies.dpo.chatml.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.dpo.llama3.html b/docs/api/prompt_strategies.dpo.llama3.html index 4b165a255..d1721d210 100644 --- a/docs/api/prompt_strategies.dpo.llama3.html +++ b/docs/api/prompt_strategies.dpo.llama3.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.dpo.passthrough.html b/docs/api/prompt_strategies.dpo.passthrough.html index ffd812d92..9f77a21a6 100644 --- a/docs/api/prompt_strategies.dpo.passthrough.html +++ b/docs/api/prompt_strategies.dpo.passthrough.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/prompt_strategies.dpo.user_defined.html b/docs/api/prompt_strategies.dpo.user_defined.html index a395ad3cd..0cf6a62c4 100644 --- a/docs/api/prompt_strategies.dpo.user_defined.html +++ b/docs/api/prompt_strategies.dpo.user_defined.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/prompt_strategies.dpo.zephyr.html b/docs/api/prompt_strategies.dpo.zephyr.html index c15e86f6d..d841eb7ad 100644 --- a/docs/api/prompt_strategies.dpo.zephyr.html +++ b/docs/api/prompt_strategies.dpo.zephyr.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/prompt_strategies.input_output.html b/docs/api/prompt_strategies.input_output.html index 0831d0efd..7e665a967 100644 --- a/docs/api/prompt_strategies.input_output.html +++ b/docs/api/prompt_strategies.input_output.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.kto.chatml.html b/docs/api/prompt_strategies.kto.chatml.html index d5f10fabf..694253da7 100644 --- a/docs/api/prompt_strategies.kto.chatml.html +++ b/docs/api/prompt_strategies.kto.chatml.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.kto.llama3.html b/docs/api/prompt_strategies.kto.llama3.html index bf5ebd206..d1eb4aa27 100644 --- a/docs/api/prompt_strategies.kto.llama3.html +++ b/docs/api/prompt_strategies.kto.llama3.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.kto.user_defined.html b/docs/api/prompt_strategies.kto.user_defined.html index 1629c733c..1b980023f 100644 --- a/docs/api/prompt_strategies.kto.user_defined.html +++ b/docs/api/prompt_strategies.kto.user_defined.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/prompt_strategies.llama2_chat.html b/docs/api/prompt_strategies.llama2_chat.html index 76373d623..9f72dc19d 100644 --- a/docs/api/prompt_strategies.llama2_chat.html +++ b/docs/api/prompt_strategies.llama2_chat.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.messages.chat.html b/docs/api/prompt_strategies.messages.chat.html index 52a8e471d..b7c3de563 100644 --- a/docs/api/prompt_strategies.messages.chat.html +++ b/docs/api/prompt_strategies.messages.chat.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.metharme.html b/docs/api/prompt_strategies.metharme.html index 87eb561d8..9ac0ced77 100644 --- a/docs/api/prompt_strategies.metharme.html +++ b/docs/api/prompt_strategies.metharme.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.orcamini.html b/docs/api/prompt_strategies.orcamini.html index 22706a87b..a2a113317 100644 --- a/docs/api/prompt_strategies.orcamini.html +++ b/docs/api/prompt_strategies.orcamini.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.orpo.chat_template.html b/docs/api/prompt_strategies.orpo.chat_template.html index 0f2fa4673..211fe10d1 100644 --- a/docs/api/prompt_strategies.orpo.chat_template.html +++ b/docs/api/prompt_strategies.orpo.chat_template.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.pygmalion.html b/docs/api/prompt_strategies.pygmalion.html index f780f2c67..c41400b63 100644 --- a/docs/api/prompt_strategies.pygmalion.html +++ b/docs/api/prompt_strategies.pygmalion.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.stepwise_supervised.html b/docs/api/prompt_strategies.stepwise_supervised.html index 0ae1c63c7..e49094161 100644 --- a/docs/api/prompt_strategies.stepwise_supervised.html +++ b/docs/api/prompt_strategies.stepwise_supervised.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_strategies.user_defined.html b/docs/api/prompt_strategies.user_defined.html index f2590c380..cde45bae3 100644 --- a/docs/api/prompt_strategies.user_defined.html +++ b/docs/api/prompt_strategies.user_defined.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/prompt_tokenizers.html b/docs/api/prompt_tokenizers.html index 3713be5a8..8dc8410e2 100644 --- a/docs/api/prompt_tokenizers.html +++ b/docs/api/prompt_tokenizers.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/train.html b/docs/api/train.html index 2d9f1b901..00f799b91 100644 --- a/docs/api/train.html +++ b/docs/api/train.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.bench.html b/docs/api/utils.bench.html index d69400323..438873f93 100644 --- a/docs/api/utils.bench.html +++ b/docs/api/utils.bench.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.callbacks.comet_.html b/docs/api/utils.callbacks.comet_.html index e446bbb98..241ac4716 100644 --- a/docs/api/utils.callbacks.comet_.html +++ b/docs/api/utils.callbacks.comet_.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.callbacks.lisa.html b/docs/api/utils.callbacks.lisa.html index 5a490eeee..a942976e2 100644 --- a/docs/api/utils.callbacks.lisa.html +++ b/docs/api/utils.callbacks.lisa.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/utils.callbacks.mlflow_.html b/docs/api/utils.callbacks.mlflow_.html index 6c26e5ef6..061c0e730 100644 --- a/docs/api/utils.callbacks.mlflow_.html +++ b/docs/api/utils.callbacks.mlflow_.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.callbacks.perplexity.html b/docs/api/utils.callbacks.perplexity.html index f3f11ac1c..5891aea12 100644 --- a/docs/api/utils.callbacks.perplexity.html +++ b/docs/api/utils.callbacks.perplexity.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.callbacks.profiler.html b/docs/api/utils.callbacks.profiler.html index 0f2b754fe..ad025ee7d 100644 --- a/docs/api/utils.callbacks.profiler.html +++ b/docs/api/utils.callbacks.profiler.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.chat_templates.html b/docs/api/utils.chat_templates.html index d52679959..a1f32f0aa 100644 --- a/docs/api/utils.chat_templates.html +++ b/docs/api/utils.chat_templates.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.collators.batching.html b/docs/api/utils.collators.batching.html index 6da5a7219..02e513924 100644 --- a/docs/api/utils.collators.batching.html +++ b/docs/api/utils.collators.batching.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.collators.core.html b/docs/api/utils.collators.core.html index 6ed11f9d9..403ee534c 100644 --- a/docs/api/utils.collators.core.html +++ b/docs/api/utils.collators.core.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/utils.collators.mamba.html b/docs/api/utils.collators.mamba.html index 36712c8a0..24688df82 100644 --- a/docs/api/utils.collators.mamba.html +++ b/docs/api/utils.collators.mamba.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.collators.mm_chat.html b/docs/api/utils.collators.mm_chat.html index d9918f76a..30b1d2861 100644 --- a/docs/api/utils.collators.mm_chat.html +++ b/docs/api/utils.collators.mm_chat.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.data.pretraining.html b/docs/api/utils.data.pretraining.html index 182dab54f..bb9fc865a 100644 --- a/docs/api/utils.data.pretraining.html +++ b/docs/api/utils.data.pretraining.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/utils.data.sft.html b/docs/api/utils.data.sft.html index 0aefd613d..f35cc559e 100644 --- a/docs/api/utils.data.sft.html +++ b/docs/api/utils.data.sft.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/api/utils.dict.html b/docs/api/utils.dict.html index 9e20e2563..59a0f48d1 100644 --- a/docs/api/utils.dict.html +++ b/docs/api/utils.dict.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.distributed.html b/docs/api/utils.distributed.html index 34a8498dc..80bc2367f 100644 --- a/docs/api/utils.distributed.html +++ b/docs/api/utils.distributed.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.freeze.html b/docs/api/utils.freeze.html index d11cf406c..0ed3391b0 100644 --- a/docs/api/utils.freeze.html +++ b/docs/api/utils.freeze.html @@ -419,6 +419,12 @@ window.Quarto = { Custom Integrations + + diff --git a/docs/api/utils.gradient_checkpointing.unsloth.html b/docs/api/utils.gradient_checkpointing.unsloth.html index 9a62ae109..5c3487972 100644 --- a/docs/api/utils.gradient_checkpointing.unsloth.html +++ b/docs/api/utils.gradient_checkpointing.unsloth.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.lora.html b/docs/api/utils.lora.html index edb2126b7..d2905c87d 100644 --- a/docs/api/utils.lora.html +++ b/docs/api/utils.lora.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.lora_embeddings.html b/docs/api/utils.lora_embeddings.html index d7ad68430..5808bceb7 100644 --- a/docs/api/utils.lora_embeddings.html +++ b/docs/api/utils.lora_embeddings.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.model_shard_quant.html b/docs/api/utils.model_shard_quant.html index a9402c15e..8fe05759f 100644 --- a/docs/api/utils.model_shard_quant.html +++ b/docs/api/utils.model_shard_quant.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.models.html b/docs/api/utils.models.html index 6468ce1c9..eedc1c511 100644 --- a/docs/api/utils.models.html +++ b/docs/api/utils.models.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.optimizers.adopt.html b/docs/api/utils.optimizers.adopt.html index be0aecbb6..34b1dfb5f 100644 --- a/docs/api/utils.optimizers.adopt.html +++ b/docs/api/utils.optimizers.adopt.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.samplers.multipack.html b/docs/api/utils.samplers.multipack.html index bd24e0ba3..23aee372d 100644 --- a/docs/api/utils.samplers.multipack.html +++ b/docs/api/utils.samplers.multipack.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.schedulers.html b/docs/api/utils.schedulers.html index 8271cfb1d..55c2d795d 100644 --- a/docs/api/utils.schedulers.html +++ b/docs/api/utils.schedulers.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.schemas.config.html b/docs/api/utils.schemas.config.html index 8084f6ba1..4e84b2f24 100644 --- a/docs/api/utils.schemas.config.html +++ b/docs/api/utils.schemas.config.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.schemas.datasets.html b/docs/api/utils.schemas.datasets.html index 949adeab9..4885b4a0e 100644 --- a/docs/api/utils.schemas.datasets.html +++ b/docs/api/utils.schemas.datasets.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.schemas.enums.html b/docs/api/utils.schemas.enums.html index 30762e866..ad5297324 100644 --- a/docs/api/utils.schemas.enums.html +++ b/docs/api/utils.schemas.enums.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.schemas.integrations.html b/docs/api/utils.schemas.integrations.html index ef409fd68..d738fa99b 100644 --- a/docs/api/utils.schemas.integrations.html +++ b/docs/api/utils.schemas.integrations.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.schemas.model.html b/docs/api/utils.schemas.model.html index aa7ff3872..791769331 100644 --- a/docs/api/utils.schemas.model.html +++ b/docs/api/utils.schemas.model.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.schemas.multimodal.html b/docs/api/utils.schemas.multimodal.html index 2e784bd1e..c2f6e2479 100644 --- a/docs/api/utils.schemas.multimodal.html +++ b/docs/api/utils.schemas.multimodal.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.schemas.peft.html b/docs/api/utils.schemas.peft.html index 7d825907f..db1690360 100644 --- a/docs/api/utils.schemas.peft.html +++ b/docs/api/utils.schemas.peft.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.schemas.training.html b/docs/api/utils.schemas.training.html index 5e8ac6a46..f64edf01d 100644 --- a/docs/api/utils.schemas.training.html +++ b/docs/api/utils.schemas.training.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.schemas.trl.html b/docs/api/utils.schemas.trl.html index 567009bd8..7392f83fe 100644 --- a/docs/api/utils.schemas.trl.html +++ b/docs/api/utils.schemas.trl.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.schemas.utils.html b/docs/api/utils.schemas.utils.html index 1582e3a95..2e6282629 100644 --- a/docs/api/utils.schemas.utils.html +++ b/docs/api/utils.schemas.utils.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.tokenization.html b/docs/api/utils.tokenization.html index 2d01051db..66f94907a 100644 --- a/docs/api/utils.tokenization.html +++ b/docs/api/utils.tokenization.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/api/utils.trainer.html b/docs/api/utils.trainer.html index 999a39bb7..96829092f 100644 --- a/docs/api/utils.trainer.html +++ b/docs/api/utils.trainer.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/batch_vs_grad.html b/docs/batch_vs_grad.html index a228d8953..f4996ff54 100644 --- a/docs/batch_vs_grad.html +++ b/docs/batch_vs_grad.html @@ -357,6 +357,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/cli.html b/docs/cli.html index 6e9fa3898..a46b4371c 100644 --- a/docs/cli.html +++ b/docs/cli.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/config.html b/docs/config.html index f0ef0002a..4c136cf07 100644 --- a/docs/config.html +++ b/docs/config.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + @@ -1112,21 +1118,24 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin # subsequences, or set to 4 to split into four equal-sized subsequences. # See https://axolotl-ai-cloud.github.io/axolotl/docs/sequence_parallelism.html for more details. sequence_parallel_degree: - -# Path to torch distx for optim 'adamw_anyprecision' -torchdistx_path: +# Optional; strides across the key dimension. Larger values use more memory but should make training faster. +# Must evenly divide the number of KV heads in your model. +heads_k_stride: 1 -# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize -pretraining_dataset: +# Path to torch distx for optim 'adamw_anyprecision' +torchdistx_path: -# Debug mode -debug: +# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize +pretraining_dataset: -# Seed -seed: +# Debug mode +debug: -# Allow overwrite yml config using from cli -strict: +# Seed +seed: + +# Allow overwrite yml config using from cli +strict: diff --git a/docs/custom_integrations.html b/docs/custom_integrations.html index 83b368bf8..1a6afed43 100644 --- a/docs/custom_integrations.html +++ b/docs/custom_integrations.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/dataset-formats/conversation.html b/docs/dataset-formats/conversation.html index f4619c882..d3b439001 100644 --- a/docs/dataset-formats/conversation.html +++ b/docs/dataset-formats/conversation.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/dataset-formats/index.html b/docs/dataset-formats/index.html index 29f61d657..f0f21da37 100644 --- a/docs/dataset-formats/index.html +++ b/docs/dataset-formats/index.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/dataset-formats/inst_tune.html b/docs/dataset-formats/inst_tune.html index 728d09aaa..d08b5f4e9 100644 --- a/docs/dataset-formats/inst_tune.html +++ b/docs/dataset-formats/inst_tune.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/dataset-formats/pretraining.html b/docs/dataset-formats/pretraining.html index 5f2fd74bb..afe2b71f1 100644 --- a/docs/dataset-formats/pretraining.html +++ b/docs/dataset-formats/pretraining.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/dataset-formats/stepwise_supervised.html b/docs/dataset-formats/stepwise_supervised.html index 0faf38f63..fa1d32723 100644 --- a/docs/dataset-formats/stepwise_supervised.html +++ b/docs/dataset-formats/stepwise_supervised.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/dataset-formats/template_free.html b/docs/dataset-formats/template_free.html index 08cbf6d51..9d5475a9c 100644 --- a/docs/dataset-formats/template_free.html +++ b/docs/dataset-formats/template_free.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/dataset-formats/tokenized.html b/docs/dataset-formats/tokenized.html index 8cec1e95f..3ea6851e4 100644 --- a/docs/dataset-formats/tokenized.html +++ b/docs/dataset-formats/tokenized.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/dataset_preprocessing.html b/docs/dataset_preprocessing.html index 5fd88931a..a02bf1ccd 100644 --- a/docs/dataset_preprocessing.html +++ b/docs/dataset_preprocessing.html @@ -357,6 +357,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/debugging.html b/docs/debugging.html index c772f67c1..511253692 100644 --- a/docs/debugging.html +++ b/docs/debugging.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/docker.html b/docs/docker.html index 3ba67e99e..c1878de8b 100644 --- a/docs/docker.html +++ b/docs/docker.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/faq.html b/docs/faq.html index a6d0318e9..f07799de6 100644 --- a/docs/faq.html +++ b/docs/faq.html @@ -357,6 +357,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/fsdp_qlora.html b/docs/fsdp_qlora.html index f36fd24fd..3fd084102 100644 --- a/docs/fsdp_qlora.html +++ b/docs/fsdp_qlora.html @@ -357,6 +357,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/getting-started.html b/docs/getting-started.html index bbf4021c3..5aab678ff 100644 --- a/docs/getting-started.html +++ b/docs/getting-started.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/inference.html b/docs/inference.html index be0a3c2b9..22f21292f 100644 --- a/docs/inference.html +++ b/docs/inference.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/input_output.html b/docs/input_output.html index a264ef644..085a6716d 100644 --- a/docs/input_output.html +++ b/docs/input_output.html @@ -357,6 +357,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/installation.html b/docs/installation.html index 853e3c440..d249cece6 100644 --- a/docs/installation.html +++ b/docs/installation.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/lora_optims.html b/docs/lora_optims.html index 88b8af7d0..238b779f6 100644 --- a/docs/lora_optims.html +++ b/docs/lora_optims.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/lr_groups.html b/docs/lr_groups.html index db111bd80..b023c09f5 100644 --- a/docs/lr_groups.html +++ b/docs/lr_groups.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/mac.html b/docs/mac.html index 7f3cb314c..02c95e6af 100644 --- a/docs/mac.html +++ b/docs/mac.html @@ -357,6 +357,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/multi-gpu.html b/docs/multi-gpu.html index a7ccd1af7..efb88bbbf 100644 --- a/docs/multi-gpu.html +++ b/docs/multi-gpu.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + @@ -442,16 +448,19 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin
  • 3 FSDP
  • -
  • 4 Performance Optimization +
  • 4 Sequence parallelism
  • -
  • 5 Troubleshooting +
  • 5 Performance Optimization
  • +
  • 6 Troubleshooting +
  • @@ -485,6 +494,7 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin @@ -527,26 +537,42 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin fsdp_state_dict_type: FULL_STATE_DICT fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer -
    -

    3.2 FSDP + QLoRA

    +
    +
    +

    4 Sequence parallelism

    +

    We support sequence parallelism (SP) via the +ring-flash-attention project. This +allows one to split up sequences across GPUs, which is useful in the event that a +single sequence causes OOM errors during model training.

    +

    First, install ring-flash-attn, recommended via pip install axolotl[ring-flash-attn], +or from source with pip install .[ring-flash-attn].

    +

    Your Axolotl YAML config should contain the following lines:

    +
    sequence_parallel_degree: 4  # Split each sequence into 4 parts, one per GPU
    +flash_attention: true  # Required with sequence parallelism
    +
    +# Optional; strides across the key dimension. Larger values use more memory but will make training faster.
    +heads_k_stride: 1
    +

    See our dedicated guide for more details.

    +
    +

    4.1 FSDP + QLoRA

    For combining FSDP with QLoRA, see our dedicated guide.

    -
    -

    4 Performance Optimization

    -
    -

    4.1 Liger Kernel Integration

    +
    +

    5 Performance Optimization

    +
    +

    5.1 Liger Kernel Integration

    Please see docs for more info.

    -
    -

    5 Troubleshooting

    -
    -

    5.1 NCCL Issues

    +
    +

    6 Troubleshooting

    +
    +

    6.1 NCCL Issues

    For NCCL-related problems, see our NCCL troubleshooting guide.

    -
    -

    5.2 Common Problems

    +
    +

    6.2 Common Problems

    @@ -1044,110 +1070,133 @@ window.document.addEventListener("DOMContentLoaded", function (event) { } });
    diff --git a/docs/multi-node.html b/docs/multi-node.html index 86beb655f..b62dce622 100644 --- a/docs/multi-node.html +++ b/docs/multi-node.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations
    + + diff --git a/docs/multimodal.html b/docs/multimodal.html index 310be4e4f..e79e583a4 100644 --- a/docs/multimodal.html +++ b/docs/multimodal.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/multipack.html b/docs/multipack.html index d988a83fd..a73609374 100644 --- a/docs/multipack.html +++ b/docs/multipack.html @@ -357,6 +357,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/docs/nccl.html b/docs/nccl.html index b6165033f..af8b62c2f 100644 --- a/docs/nccl.html +++ b/docs/nccl.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/ray-integration.html b/docs/ray-integration.html index 852b4d44b..2b1712344 100644 --- a/docs/ray-integration.html +++ b/docs/ray-integration.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/reward_modelling.html b/docs/reward_modelling.html index 60c5e92c6..b7e1ac768 100644 --- a/docs/reward_modelling.html +++ b/docs/reward_modelling.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/rlhf.html b/docs/rlhf.html index e1410bc4f..9d6d92839 100644 --- a/docs/rlhf.html +++ b/docs/rlhf.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/sequence_parallelism.html b/docs/sequence_parallelism.html index 0d60188c1..7a819ae66 100644 --- a/docs/sequence_parallelism.html +++ b/docs/sequence_parallelism.html @@ -130,7 +130,7 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin - + @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + @@ -450,7 +456,7 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin
    -
    +

    Sequence Parallelism

    @@ -493,7 +499,9 @@ through a ring communication pattern.

    Configuration

    To enable sequence parallelism, add the following to your configuration file:

    # Set to a divisor (> 1) of the number of GPUs available
    -sequence_parallel_degree: 4  # Split sequences across 4 GPUs
    +sequence_parallel_degree: 4 # Split sequences across 4 GPUs +# Optional; strides across the key dimension. Larger values use more memory but should make training faster. +heads_k_stride: 1

    The sequence_parallel_degree should be a divisor of the total number of GPUs. For example:

    • With 8 GPUs, valid values would be 2, 4, or 8
    • @@ -531,12 +539,17 @@ through a ring communication pattern.

    Example

    -
    # Example config with sequence parallelism
    -base_model: meta-llama/Llama-3-8B-Instruct
    -sequence_len: 8192
    -sequence_parallel_degree: 2  # Split each sequence into 4 parts
    -flash_attention: true  # Required with sequence parallelism
    -...
    +
    base_model: meta-llama/Llama-3-8B-Instruct
    +sequence_len: 8192
    +
    +...
    +
    +sequence_parallel_degree: 4  # Split each sequence into 4 parts, one per GPU
    +flash_attention: true  # Required with sequence parallelism
    +# Optional; strides across the key dimension. Larger values use more memory but should make training faster.
    +heads_k_stride: 1
    +
    +...

    This will train the Llama 3 8B model with 8K context length, with each sequence split into 2 subsequences of length 4096 across 2 GPUs.

    diff --git a/docs/torchao.html b/docs/torchao.html index f66c89b5c..3d176dde5 100644 --- a/docs/torchao.html +++ b/docs/torchao.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/docs/unsloth.html b/docs/unsloth.html index ad87fd67d..f5870bde7 100644 --- a/docs/unsloth.html +++ b/docs/unsloth.html @@ -391,6 +391,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/examples/colab-notebooks/colab-axolotl-example.html b/examples/colab-notebooks/colab-axolotl-example.html index 6bb7bec71..7e7c24e0c 100644 --- a/examples/colab-notebooks/colab-axolotl-example.html +++ b/examples/colab-notebooks/colab-axolotl-example.html @@ -419,6 +419,12 @@ window.Quarto = { Custom Integrations + + diff --git a/index.html b/index.html index 186e78e5c..bdbadf307 100644 --- a/index.html +++ b/index.html @@ -390,6 +390,12 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin Custom Integrations + + diff --git a/search.json b/search.json index e6e3e9a31..af982f7a4 100644 --- a/search.json +++ b/search.json @@ -152,7 +152,7 @@ "href": "docs/config.html", "title": "Config Reference", "section": "", - "text": "# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files\n# This can also be a relative path to a model on disk\nbase_model: ./llama-7b-hf\n# You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)\nbase_model_ignore_patterns:\n# If the base_model repo on hf hub doesn't include configuration .json files,\n# You can set that here, or leave this empty to default to base_model\nbase_model_config: ./llama-7b-hf\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model:\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config:\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too\nmodel_type: AutoModelForCausalLM\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: AutoTokenizer\n# Trust remote code for untrusted source\ntrust_remote_code:\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast:\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy:\n# Resize the model embeddings when new tokens are added to multiples of 32\n# This is reported to improve training speed on some models\nresize_token_embeddings_to_32x:\n# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings:\n# Whether to load the model with randomly initialized weights. Useful for\n# pre-training a model from scratch or debugging purposes.\nrandom_init_weights:\n\n# (Internal use only)\n# Used to identify which the model is based on\nis_falcon_derived_model:\nis_llama_derived_model:\nis_qwen_derived_model:\n# Please note that if you set this to true, `padding_side` will be set to \"left\" by default\nis_mistral_derived_model:\n\n# optional overrides to the base model configuration\noverrides_of_model_config:\n # RoPE Scaling https://github.com/huggingface/transformers/pull/24653\n rope_scaling:\n type: # linear | dynamic\n factor: # float\n\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs:\n # use_cache: False\n\n# optional overrides to the bnb 4bit quantization configuration\n# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig\nbnb_config_kwargs:\n # These are default values\n llm_int8_has_fp16_weight: false\n bnb_4bit_quant_type: nf4\n bnb_4bit_use_double_quant: true\n\n\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: true\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: true\n# Use bitsandbytes 4 bit\nload_in_4bit:\n\n# Use CUDA bf16\nbf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere\n# Use CUDA fp16\nfp16: true\n# Use CUDA tf32\ntf32: true # require >=ampere\n\n# No AMP (automatic mixed precision)\nbfloat16: true # require >=ampere\nfloat16: true\n\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset\ngpu_memory_limit: 20GiB\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: true\n\n# List[str]. Add plugins to extend the pipeline.\n# See `src/axolotl/integrations` for the available plugins or doc below for more details.\n# https://axolotl-ai-cloud.github.io/axolotl/docs/custom_integrations.html\nplugins:\n # - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin\n\n# A list of one or more datasets to finetune the model with\ndatasets:\n # HuggingFace dataset repo | s3://,gs:// path | \"json\" for local dataset, make sure to fill data_files\n - path: vicgalle/alpaca-gpt4\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>\n ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file\n data_files: # Optional[str] path to source data files\n\n shards: # Optional[int] split dataset into N pieces (use with shards_idx)\n shards_idx: # Optional[int] = 0 the index of sharded dataset to use\n\n preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)\n\n name: # Optional[str] name of dataset configuration to load\n train_on_split: train # Optional[str] name of dataset split to load from\n revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets.\n trust_remote_code: # Optional[bool] Trust remote code for untrusted source\n\n # Custom user instruction prompt\n - path: repo\n type:\n # The below are defaults. only set what's needed if you use a different column name.\n system_prompt: \"\"\n system_format: \"{system}\"\n field_system: system\n field_instruction: instruction\n field_input: input\n field_output: output\n\n # Customizable to be single line or multi-line\n # Use {instruction}/{input} as key to be replaced\n # 'format' can include {input}\n format: |-\n User: {instruction} {input}\n Assistant:\n # 'no_input_format' cannot include {input}\n no_input_format: \"{instruction} \"\n\n # For `completion` datsets only, uses the provided field instead of `text` column\n field:\n\n # Using chat template\n - path: ...\n # Set type to `chat_template` to use this strategy\n type: chat_template\n # Specify the name of the chat template to use\n # The name of the chat template to use for training, following values are supported:\n # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default.\n # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml.\n # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n chat_template: tokenizer_default\n\n # Custom jinja chat template. Used only if `chat_template: jinja` or empty.\n chat_template_jinja:\n\n # Key containing the messages (default: \"messages\")\n field_messages: messages\n\n # Mapping of properties from the input dataset to the chat template.\n # (default: message_property_mappings={'role':'role', 'content':'content'})\n # If a property exists in the template but not in this mapping, the system will attempt\n # to load it directly from the message using the property name as the key.\n # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role',\n # while 'value' is loaded and used as 'content' in the chat template.\n message_property_mappings:\n role: from\n content: value\n # ...\n\n # Optional[Dict[str, List]]. Roles mapping in the messages. The default is:\n roles:\n user: [\"human\", \"user\"]\n assistant: [\"gpt\", \"assistant\"]\n system: [\"system\"]\n tool: [\"tool\"]\n\n # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If you wish to,\n # we recommend using a custom jinja template with the default system message removed or\n # adding a system turn with empty content.\n drop_system_message:\n\n # IMPORTANT: The following fields determine which parts of the conversation to train on.\n # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train\n # See examples at `docs/dataset-formats/conversation.qmd`\n # Note: If the below 4 fields are set to empty, defaults to training only on the last message.\n\n # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: [\"assistant\"] # default\n # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are:\n # - all: train on all EOS tokens\n # - turn (default): train on the EOS token at the end of each trainable turn\n # - last: train on the last EOS token in the conversation\n # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.\n train_on_eos: last\n # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.\n message_field_training: training\n # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.\n # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train).\n message_field_training_detail: train_detail\n\n\n# If false, the datasets will not be shuffled and will keep their original order in `datasets`.\n# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: true\n\nDeduplicates datasets and test_datasets with identical entries.\ndataset_exact_deduplication: true\n\n# A list of one or more datasets to eval the model with.\n# You can use either test_datasets, or val_set_size, but not both.\ntest_datasets:\n - path: /workspace/data/eval.jsonl\n ds_type: json\n # You need to specify a split. For \"json\" datasets the default split is called \"train\".\n split: train\n type: completion\n data_files:\n - /workspace/data/eval.jsonl\n\n# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl:\nrl_beta: # Optional[float]. The beta parameter for the RL training.\n\n# dpo\ndpo_use_weighting: # Optional[bool]. Whether to perform weighting.\nrpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper.\n\n# orpo\norpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping.\n\n# kto\nkto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss.\nkto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss.\n\n# simpo\ncpo_alpha: 1.0 # Weight of the BC regularizer\nsimpo_gamma: 0.5 # Target reward margin for the SimPO loss\n\n# grpo\ntrl:\n use_vllm: # Optional[bool]. Whether to use VLLM for RL training.\n vllm_device: # Optional[str]. Device to use for VLLM.\n vllm_gpu_memory_utilization: # Optional[float]. GPU memory utilization for VLLM.\n vllm_max_model_len: # Optional[int]. Maximum length of the model for VLLM.\n vllm_dtype: # Optional[str]. Data type for VLLM.\n\n beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use\n max_completion_length: # Optional[int]. Maximum length of the completion for RL training.\n\n reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir.\n reward_weights: # Optional[list[float]]. List of reward weights for the reward functions.\n\n num_generations: # Optional[int]. Number of generations to sample.\n log_completions: # Optional[bool]. Whether to log completions.\n\n sync_ref_model: # Optional[bool]. Whether to sync the reference model.\n ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model.\n ref_model_sync_steps: # Optional[int]. Sync steps for the reference model.\n\n\n# reward modelling: `True` or `False`\nreward_model:\n\n# process reward modelling: `True` or `False`\nprocess_reward_model:\n\n# The name of the chat template to use for training, following values are supported:\n# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value.\n# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer.\n# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n# The selected chat template will be saved to the tokenizer_config.json for easier inferencing\n# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template.\nchat_template: tokenizer_default\n# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.\nchat_template_jinja: null\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: You are a helpful assistant. Please give a long and detailed answer.\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: data/last_run_prepared\n# Push prepared dataset to hub\npush_dataset_to_hub: # Optional[str] repo_org/repo_name\n# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`\n# if not set.\ndataset_processes: # defaults to os.cpu_count() if not set\n# Keep dataset in memory while preprocessing\n# Only needed if cached dataset is taking too much storage\ndataset_keep_in_memory:\n# push checkpoints to hub\nhub_model_id: # private repo path to push finetuned model\n# how to push checkpoints to hub\n# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy\nhub_strategy:\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets\n# Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: # boolean\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.\nval_set_size: 0.04\n# Num shards for whole dataset\ndataset_shard_num:\n# Index of shard to use for whole dataset\ndataset_shard_idx:\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: 2048\n# Pad inputs so each step uses constant sized buffers\n# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently\npad_to_sequence_len:\n# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'\nsample_packing:\n# Set to 'false' if getting errors during eval with sample_packing on.\neval_sample_packing:\n# You can set these packing optimizations AFTER starting a training at least once.\n# The trainer will provide recommended values for these values.\nsample_packing_eff_est:\ntotal_num_tokens:\n# Increasing the following values helps with packing, but usually only slightly (<%1.)\n# The number of samples packed at a time.\nsample_packing_group_size: 100000\n# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples.\nsample_packing_bin_size: 200\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation:\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening:\n\n# Passed through to transformers when loading the model when launched without accelerate\n# Use `sequential` when training w/ model parallelism to limit memory\ndevice_map:\n# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.\nmax_memory:\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model\nadapter: lora\n# If you already have a lora model trained that you want to load, put that here.\n# This means after training, if you want to test the model, you should set this to the value of `output_dir`.\n# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir:\n\n# LoRA hyperparameters\n# For more details about the following options, see:\n# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2\nlora_r: 8\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_modules:\n - q_proj\n - v_proj\n# - k_proj\n# - o_proj\n# - gate_proj\n# - down_proj\n# - up_proj\nlora_target_linear: # If true, will target all linear modules\npeft_layers_to_transform: # The layer indices to transform, otherwise, apply to all layers\n\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.\n# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.\n# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\n# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994\nlora_modules_to_save:\n# - embed_tokens\n# - lm_head\n\nlora_fan_in_fan_out: false\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for\n# speed and memory savings\n# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html\nlora_mlp_kernel: true\nlora_qkv_kernel: true\nlora_o_kernel: true\n\n# LoRA+ hyperparameters\n# For more details about the following options, see:\n# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py`\nloraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6.\n\npeft:\n # Configuration options for loftq initialization for LoRA\n # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization\n loftq_config:\n loftq_bits: # typically 4 bits\n\n# ReLoRA configuration\n# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed\nrelora_steps: # Number of steps per ReLoRA restart\nrelora_warmup_steps: # Number of per-restart warmup steps\nrelora_anneal_steps: # Number of anneal steps for each relora cycle\nrelora_prune_ratio: # threshold for optimizer magnitude when pruning\nrelora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings\n\n# wandb configuration if you're using it\n# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.\nwandb_mode: # \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn off wandb\nwandb_project: # Your wandb project name\nwandb_entity: # A wandb Team name if using a Team\nwandb_watch:\nwandb_name: # Set the name of your wandb run\nwandb_run_id: # Set the ID of your wandb run\nwandb_log_model: # \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only at the end of training\n\n# mlflow configuration if you're using it\nmlflow_tracking_uri: # URI to mlflow\nmlflow_experiment_name: # Your experiment name\nmlflow_run_name: # Your run name\nhf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry\n\n# Comet configuration if you're using it\n# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`.\n# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start\nuse_comet: # Enable or disable Comet integration.\ncomet_api_key: # API key for Comet. Recommended to set via `comet login`.\ncomet_workspace: # Workspace name in Comet. Defaults to the user's default workspace.\ncomet_project_name: # Project name in Comet. Defaults to Uncategorized.\ncomet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key.\ncomet_mode: # Create a new experiment (\"create\") or log to an existing one (\"get\"). Default (\"get_or_create\") auto-selects based on configuration.\ncomet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True.\ncomet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details.\n\n# Tensorboard\nuse_tensorboard: # Optional[bool]\n\n# Where to save the full-finetuned model to\noutput_dir: ./completed-model\n\n# Whether to use torch.compile and which backend to use\n# setting to `auto` will enable torch compile when torch>=2.5.1\ntorch_compile: # Optional[Union[Literal[\"auto\"], bool]]\ntorch_compile_backend: # Optional[str]\n\n# Training hyperparameters\n\n# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.\ngradient_accumulation_steps: 1\n# The number of samples to include in each batch. This is the number of samples sent to each GPU.\n# Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: 2\neval_batch_size:\nnum_epochs: 4\nwarmup_steps: 100 # cannot use with warmup_ratio\nwarmup_ratio: 0.05 # cannot use with warmup_steps\nlearning_rate: 0.00003\nlr_quadratic_warmup:\nlogging_steps:\neval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps\nevals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps\neval_strategy: # Set to `\"no\"` to skip evaluation, `\"epoch\"` at end of each epoch, leave empty to infer from `eval_steps`.\nsave_strategy: # Set to `\"no\"` to skip checkpoint saves, `\"epoch\"` at end of each epoch, `\"best\"` when better result is achieved, leave empty to infer from `save_steps`.\nsave_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps\nsaves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsave_total_limit: # Checkpoints saved at a time\n# Maximum number of iterations to train for. It precedes num_epochs which means that\n# if both are set, num_epochs will not be guaranteed.\n# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps:\n\n# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second: # Optional[bool]\n\n# whether to find batch size that fits in memory. Passed to underlying transformers Trainer\nauto_find_batch_size: # Optional[bool]\n\neval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0\neval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128\ndo_causal_lm_eval: # Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`.\neval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is [\"sacrebleu\", \"comet\", \"ter\", \"chrf\", \"perplexity\"]\n\nprofiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir.\n # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information\n # snapshots can be visualized @ https://pytorch.org/memory_viz\n\nloss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)\nloss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)\n\n# Save model as safetensors (require safetensors package)\nsave_safetensors:\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: false\n# Group similarly sized data to minimize padding.\n# May be slower to start, as it must download and sort the entire dataset.\n# Note that training loss may have an oscillating pattern with this enabled.\ngroup_by_length: false\n\n# Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: false\n# additional kwargs to pass to the trainer for gradient checkpointing\n# gradient_checkpointing_kwargs:\n# use_reentrant: true\n\n# Stop training after this many evaluation losses have increased in a row\n# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback\nearly_stopping_patience: 3\n\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine\nlr_scheduler_kwargs:\ncosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr\ncosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)\n\n# For one_cycle optim\nlr_div_factor: # Learning rate div factor\n\n# Specify optimizer\n# Valid values are driven by the Transformers OptimizerNames class, see:\n# https://github.com/huggingface/transformers/blob/cbf924b76c03828101a34069a96d209314114fd5/src/transformers/training_args.py#L144-L189\n#\n# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of\n# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used\n# in the examples/ for your model and fine-tuning use case.\n#\n# Valid values for 'optimizer' include:\n# - adamw_torch\n# - adamw_torch_fused\n# - adamw_torch_xla\n# - adamw_torch_npu_fused\n# - adamw_apex_fused\n# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1)\n# - adafactor\n# - adamw_anyprecision\n# - adamw_torch_4bit\n# - ademamix\n# - sgd\n# - adagrad\n# - adamw_bnb_8bit\n# - adamw_8bit # alias for adamw_bnb_8bit\n# - ademamix_8bit\n# - lion_8bit\n# - lion_32bit\n# - paged_adamw_32bit\n# - paged_adamw_8bit\n# - paged_ademamix_32bit\n# - paged_ademamix_8bit\n# - paged_lion_32bit\n# - paged_lion_8bit\n# - rmsprop\n# - rmsprop_bnb\n# - rmsprop_bnb_8bit\n# - rmsprop_bnb_32bit\n# - galore_adamw\n# - galore_adamw_8bit\n# - galore_adafactor\n# - galore_adamw_layerwise\n# - galore_adamw_8bit_layerwise\n# - galore_adafactor_layerwise\n# - lomo\n# - adalomo\n# - grokadamw\n# - schedule_free_adamw\n# - schedule_free_sgd\n# - apollo_adamw\n# - apollo_adamw_layerwise\n#\n# Additional custom optimizers include:\n# - optimi_adamw\n# - ao_adamw_8bit\n# - ao_adamw_fp8\noptimizer:\n# Dictionary of arguments to pass to the optimizer\noptim_args:\n# For Galore Optimizers the following optim_args are available\n# rank: # type: int\n# update_proj_gap # type: int\n# scale # type: float\n# proj_type: # type: str, default = std\n\n# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm\noptim_target_modules:\n# - self_attn # for llama\n# - mlp\n\n# Specify weight decay\nweight_decay:\n# adamw hyperparams\nadam_beta1:\nadam_beta2:\nadam_epsilon:\n# Gradient clipping max norm\nmax_grad_norm:\n\n# Augmentation techniques\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings\n# currently only supported on Llama and Mistral\nneftune_noise_alpha:\n\n# Whether to bettertransformers\nflash_optimum:\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers:\nxformers_attention:\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:\nflash_attention:\nflash_attn_cross_entropy: # Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_rms_norm: # Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_fuse_qkv: # Whether to fuse QKV into a single operation\nflash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation\n# Whether to use scaled-dot-product attention\n# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention:\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention:\n# Optional[bool]. Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage:\n# Resume from a specific checkpoint dir\nresume_from_checkpoint:\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: false\n\n## Multimodal section\n# int | tuple[int, int] | None . Size to resize images to, width x height.\n# Will read from model/processor config if not set.\nimage_size:\n# str. Algorithm to use for image resizing. \"bilinear\", \"bicubic\", \"lanczos\". Default is \"bilinear\".\nimage_resize_algorithm: 'bilinear'\n## End of multimodal section\n\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank:\n\n# Add or change special tokens.\n# If you add tokens here, you don't need to add them to the `tokens` list.\nspecial_tokens:\n # bos_token: \"<s>\"\n # eos_token: \"</s>\"\n # unk_token: \"<unk>\"\n # pad_token: \"[PAD]\"\n\n# Add extra tokens.\ntokens:\n\n# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.\n# Only works for tokens that are not part of the base vocab (aka are added_tokens).\n# Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: # Dict[int, str]\n# 128041: \"<|im_start|>\"\n# 128042: \"<|im_end|>\"\n\n# FSDP\nfsdp:\nfsdp_config:\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed:\n\n# Advanced DDP Arguments\nddp_timeout:\nddp_bucket_cap_mb:\nddp_broadcast_buffers:\n\n# Sequence parallelism\n# Set to a divisor of the number of GPUs available to split sequences into chunks of equal size.\n# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM.\n# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized\n# subsequences, or set to 4 to split into four equal-sized subsequences.\n# See https://axolotl-ai-cloud.github.io/axolotl/docs/sequence_parallelism.html for more details.\nsequence_parallel_degree:\n\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path:\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset:\n\n# Debug mode\ndebug:\n\n# Seed\nseed:\n\n# Allow overwrite yml config using from cli\nstrict:", + "text": "# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files\n# This can also be a relative path to a model on disk\nbase_model: ./llama-7b-hf\n# You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)\nbase_model_ignore_patterns:\n# If the base_model repo on hf hub doesn't include configuration .json files,\n# You can set that here, or leave this empty to default to base_model\nbase_model_config: ./llama-7b-hf\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model:\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config:\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too\nmodel_type: AutoModelForCausalLM\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: AutoTokenizer\n# Trust remote code for untrusted source\ntrust_remote_code:\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast:\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy:\n# Resize the model embeddings when new tokens are added to multiples of 32\n# This is reported to improve training speed on some models\nresize_token_embeddings_to_32x:\n# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings:\n# Whether to load the model with randomly initialized weights. Useful for\n# pre-training a model from scratch or debugging purposes.\nrandom_init_weights:\n\n# (Internal use only)\n# Used to identify which the model is based on\nis_falcon_derived_model:\nis_llama_derived_model:\nis_qwen_derived_model:\n# Please note that if you set this to true, `padding_side` will be set to \"left\" by default\nis_mistral_derived_model:\n\n# optional overrides to the base model configuration\noverrides_of_model_config:\n # RoPE Scaling https://github.com/huggingface/transformers/pull/24653\n rope_scaling:\n type: # linear | dynamic\n factor: # float\n\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs:\n # use_cache: False\n\n# optional overrides to the bnb 4bit quantization configuration\n# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig\nbnb_config_kwargs:\n # These are default values\n llm_int8_has_fp16_weight: false\n bnb_4bit_quant_type: nf4\n bnb_4bit_use_double_quant: true\n\n\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: true\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: true\n# Use bitsandbytes 4 bit\nload_in_4bit:\n\n# Use CUDA bf16\nbf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere\n# Use CUDA fp16\nfp16: true\n# Use CUDA tf32\ntf32: true # require >=ampere\n\n# No AMP (automatic mixed precision)\nbfloat16: true # require >=ampere\nfloat16: true\n\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset\ngpu_memory_limit: 20GiB\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: true\n\n# List[str]. Add plugins to extend the pipeline.\n# See `src/axolotl/integrations` for the available plugins or doc below for more details.\n# https://axolotl-ai-cloud.github.io/axolotl/docs/custom_integrations.html\nplugins:\n # - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin\n\n# A list of one or more datasets to finetune the model with\ndatasets:\n # HuggingFace dataset repo | s3://,gs:// path | \"json\" for local dataset, make sure to fill data_files\n - path: vicgalle/alpaca-gpt4\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>\n ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file\n data_files: # Optional[str] path to source data files\n\n shards: # Optional[int] split dataset into N pieces (use with shards_idx)\n shards_idx: # Optional[int] = 0 the index of sharded dataset to use\n\n preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)\n\n name: # Optional[str] name of dataset configuration to load\n train_on_split: train # Optional[str] name of dataset split to load from\n revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets.\n trust_remote_code: # Optional[bool] Trust remote code for untrusted source\n\n # Custom user instruction prompt\n - path: repo\n type:\n # The below are defaults. only set what's needed if you use a different column name.\n system_prompt: \"\"\n system_format: \"{system}\"\n field_system: system\n field_instruction: instruction\n field_input: input\n field_output: output\n\n # Customizable to be single line or multi-line\n # Use {instruction}/{input} as key to be replaced\n # 'format' can include {input}\n format: |-\n User: {instruction} {input}\n Assistant:\n # 'no_input_format' cannot include {input}\n no_input_format: \"{instruction} \"\n\n # For `completion` datsets only, uses the provided field instead of `text` column\n field:\n\n # Using chat template\n - path: ...\n # Set type to `chat_template` to use this strategy\n type: chat_template\n # Specify the name of the chat template to use\n # The name of the chat template to use for training, following values are supported:\n # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default.\n # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml.\n # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n chat_template: tokenizer_default\n\n # Custom jinja chat template. Used only if `chat_template: jinja` or empty.\n chat_template_jinja:\n\n # Key containing the messages (default: \"messages\")\n field_messages: messages\n\n # Mapping of properties from the input dataset to the chat template.\n # (default: message_property_mappings={'role':'role', 'content':'content'})\n # If a property exists in the template but not in this mapping, the system will attempt\n # to load it directly from the message using the property name as the key.\n # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role',\n # while 'value' is loaded and used as 'content' in the chat template.\n message_property_mappings:\n role: from\n content: value\n # ...\n\n # Optional[Dict[str, List]]. Roles mapping in the messages. The default is:\n roles:\n user: [\"human\", \"user\"]\n assistant: [\"gpt\", \"assistant\"]\n system: [\"system\"]\n tool: [\"tool\"]\n\n # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If you wish to,\n # we recommend using a custom jinja template with the default system message removed or\n # adding a system turn with empty content.\n drop_system_message:\n\n # IMPORTANT: The following fields determine which parts of the conversation to train on.\n # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train\n # See examples at `docs/dataset-formats/conversation.qmd`\n # Note: If the below 4 fields are set to empty, defaults to training only on the last message.\n\n # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: [\"assistant\"] # default\n # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are:\n # - all: train on all EOS tokens\n # - turn (default): train on the EOS token at the end of each trainable turn\n # - last: train on the last EOS token in the conversation\n # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.\n train_on_eos: last\n # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.\n message_field_training: training\n # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.\n # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train).\n message_field_training_detail: train_detail\n\n\n# If false, the datasets will not be shuffled and will keep their original order in `datasets`.\n# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: true\n\nDeduplicates datasets and test_datasets with identical entries.\ndataset_exact_deduplication: true\n\n# A list of one or more datasets to eval the model with.\n# You can use either test_datasets, or val_set_size, but not both.\ntest_datasets:\n - path: /workspace/data/eval.jsonl\n ds_type: json\n # You need to specify a split. For \"json\" datasets the default split is called \"train\".\n split: train\n type: completion\n data_files:\n - /workspace/data/eval.jsonl\n\n# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl:\nrl_beta: # Optional[float]. The beta parameter for the RL training.\n\n# dpo\ndpo_use_weighting: # Optional[bool]. Whether to perform weighting.\nrpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper.\n\n# orpo\norpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping.\n\n# kto\nkto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss.\nkto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss.\n\n# simpo\ncpo_alpha: 1.0 # Weight of the BC regularizer\nsimpo_gamma: 0.5 # Target reward margin for the SimPO loss\n\n# grpo\ntrl:\n use_vllm: # Optional[bool]. Whether to use VLLM for RL training.\n vllm_device: # Optional[str]. Device to use for VLLM.\n vllm_gpu_memory_utilization: # Optional[float]. GPU memory utilization for VLLM.\n vllm_max_model_len: # Optional[int]. Maximum length of the model for VLLM.\n vllm_dtype: # Optional[str]. Data type for VLLM.\n\n beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use\n max_completion_length: # Optional[int]. Maximum length of the completion for RL training.\n\n reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir.\n reward_weights: # Optional[list[float]]. List of reward weights for the reward functions.\n\n num_generations: # Optional[int]. Number of generations to sample.\n log_completions: # Optional[bool]. Whether to log completions.\n\n sync_ref_model: # Optional[bool]. Whether to sync the reference model.\n ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model.\n ref_model_sync_steps: # Optional[int]. Sync steps for the reference model.\n\n\n# reward modelling: `True` or `False`\nreward_model:\n\n# process reward modelling: `True` or `False`\nprocess_reward_model:\n\n# The name of the chat template to use for training, following values are supported:\n# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value.\n# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer.\n# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n# The selected chat template will be saved to the tokenizer_config.json for easier inferencing\n# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template.\nchat_template: tokenizer_default\n# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.\nchat_template_jinja: null\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: You are a helpful assistant. Please give a long and detailed answer.\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: data/last_run_prepared\n# Push prepared dataset to hub\npush_dataset_to_hub: # Optional[str] repo_org/repo_name\n# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`\n# if not set.\ndataset_processes: # defaults to os.cpu_count() if not set\n# Keep dataset in memory while preprocessing\n# Only needed if cached dataset is taking too much storage\ndataset_keep_in_memory:\n# push checkpoints to hub\nhub_model_id: # private repo path to push finetuned model\n# how to push checkpoints to hub\n# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy\nhub_strategy:\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets\n# Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: # boolean\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.\nval_set_size: 0.04\n# Num shards for whole dataset\ndataset_shard_num:\n# Index of shard to use for whole dataset\ndataset_shard_idx:\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: 2048\n# Pad inputs so each step uses constant sized buffers\n# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently\npad_to_sequence_len:\n# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'\nsample_packing:\n# Set to 'false' if getting errors during eval with sample_packing on.\neval_sample_packing:\n# You can set these packing optimizations AFTER starting a training at least once.\n# The trainer will provide recommended values for these values.\nsample_packing_eff_est:\ntotal_num_tokens:\n# Increasing the following values helps with packing, but usually only slightly (<%1.)\n# The number of samples packed at a time.\nsample_packing_group_size: 100000\n# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples.\nsample_packing_bin_size: 200\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation:\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening:\n\n# Passed through to transformers when loading the model when launched without accelerate\n# Use `sequential` when training w/ model parallelism to limit memory\ndevice_map:\n# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.\nmax_memory:\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model\nadapter: lora\n# If you already have a lora model trained that you want to load, put that here.\n# This means after training, if you want to test the model, you should set this to the value of `output_dir`.\n# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir:\n\n# LoRA hyperparameters\n# For more details about the following options, see:\n# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2\nlora_r: 8\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_modules:\n - q_proj\n - v_proj\n# - k_proj\n# - o_proj\n# - gate_proj\n# - down_proj\n# - up_proj\nlora_target_linear: # If true, will target all linear modules\npeft_layers_to_transform: # The layer indices to transform, otherwise, apply to all layers\n\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.\n# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.\n# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\n# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994\nlora_modules_to_save:\n# - embed_tokens\n# - lm_head\n\nlora_fan_in_fan_out: false\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for\n# speed and memory savings\n# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html\nlora_mlp_kernel: true\nlora_qkv_kernel: true\nlora_o_kernel: true\n\n# LoRA+ hyperparameters\n# For more details about the following options, see:\n# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py`\nloraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6.\n\npeft:\n # Configuration options for loftq initialization for LoRA\n # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization\n loftq_config:\n loftq_bits: # typically 4 bits\n\n# ReLoRA configuration\n# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed\nrelora_steps: # Number of steps per ReLoRA restart\nrelora_warmup_steps: # Number of per-restart warmup steps\nrelora_anneal_steps: # Number of anneal steps for each relora cycle\nrelora_prune_ratio: # threshold for optimizer magnitude when pruning\nrelora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings\n\n# wandb configuration if you're using it\n# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.\nwandb_mode: # \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn off wandb\nwandb_project: # Your wandb project name\nwandb_entity: # A wandb Team name if using a Team\nwandb_watch:\nwandb_name: # Set the name of your wandb run\nwandb_run_id: # Set the ID of your wandb run\nwandb_log_model: # \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only at the end of training\n\n# mlflow configuration if you're using it\nmlflow_tracking_uri: # URI to mlflow\nmlflow_experiment_name: # Your experiment name\nmlflow_run_name: # Your run name\nhf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry\n\n# Comet configuration if you're using it\n# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`.\n# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start\nuse_comet: # Enable or disable Comet integration.\ncomet_api_key: # API key for Comet. Recommended to set via `comet login`.\ncomet_workspace: # Workspace name in Comet. Defaults to the user's default workspace.\ncomet_project_name: # Project name in Comet. Defaults to Uncategorized.\ncomet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key.\ncomet_mode: # Create a new experiment (\"create\") or log to an existing one (\"get\"). Default (\"get_or_create\") auto-selects based on configuration.\ncomet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True.\ncomet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details.\n\n# Tensorboard\nuse_tensorboard: # Optional[bool]\n\n# Where to save the full-finetuned model to\noutput_dir: ./completed-model\n\n# Whether to use torch.compile and which backend to use\n# setting to `auto` will enable torch compile when torch>=2.5.1\ntorch_compile: # Optional[Union[Literal[\"auto\"], bool]]\ntorch_compile_backend: # Optional[str]\n\n# Training hyperparameters\n\n# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.\ngradient_accumulation_steps: 1\n# The number of samples to include in each batch. This is the number of samples sent to each GPU.\n# Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: 2\neval_batch_size:\nnum_epochs: 4\nwarmup_steps: 100 # cannot use with warmup_ratio\nwarmup_ratio: 0.05 # cannot use with warmup_steps\nlearning_rate: 0.00003\nlr_quadratic_warmup:\nlogging_steps:\neval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps\nevals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps\neval_strategy: # Set to `\"no\"` to skip evaluation, `\"epoch\"` at end of each epoch, leave empty to infer from `eval_steps`.\nsave_strategy: # Set to `\"no\"` to skip checkpoint saves, `\"epoch\"` at end of each epoch, `\"best\"` when better result is achieved, leave empty to infer from `save_steps`.\nsave_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps\nsaves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsave_total_limit: # Checkpoints saved at a time\n# Maximum number of iterations to train for. It precedes num_epochs which means that\n# if both are set, num_epochs will not be guaranteed.\n# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps:\n\n# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second: # Optional[bool]\n\n# whether to find batch size that fits in memory. Passed to underlying transformers Trainer\nauto_find_batch_size: # Optional[bool]\n\neval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0\neval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128\ndo_causal_lm_eval: # Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`.\neval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is [\"sacrebleu\", \"comet\", \"ter\", \"chrf\", \"perplexity\"]\n\nprofiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir.\n # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information\n # snapshots can be visualized @ https://pytorch.org/memory_viz\n\nloss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)\nloss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)\n\n# Save model as safetensors (require safetensors package)\nsave_safetensors:\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: false\n# Group similarly sized data to minimize padding.\n# May be slower to start, as it must download and sort the entire dataset.\n# Note that training loss may have an oscillating pattern with this enabled.\ngroup_by_length: false\n\n# Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: false\n# additional kwargs to pass to the trainer for gradient checkpointing\n# gradient_checkpointing_kwargs:\n# use_reentrant: true\n\n# Stop training after this many evaluation losses have increased in a row\n# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback\nearly_stopping_patience: 3\n\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine\nlr_scheduler_kwargs:\ncosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr\ncosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)\n\n# For one_cycle optim\nlr_div_factor: # Learning rate div factor\n\n# Specify optimizer\n# Valid values are driven by the Transformers OptimizerNames class, see:\n# https://github.com/huggingface/transformers/blob/cbf924b76c03828101a34069a96d209314114fd5/src/transformers/training_args.py#L144-L189\n#\n# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of\n# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used\n# in the examples/ for your model and fine-tuning use case.\n#\n# Valid values for 'optimizer' include:\n# - adamw_torch\n# - adamw_torch_fused\n# - adamw_torch_xla\n# - adamw_torch_npu_fused\n# - adamw_apex_fused\n# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1)\n# - adafactor\n# - adamw_anyprecision\n# - adamw_torch_4bit\n# - ademamix\n# - sgd\n# - adagrad\n# - adamw_bnb_8bit\n# - adamw_8bit # alias for adamw_bnb_8bit\n# - ademamix_8bit\n# - lion_8bit\n# - lion_32bit\n# - paged_adamw_32bit\n# - paged_adamw_8bit\n# - paged_ademamix_32bit\n# - paged_ademamix_8bit\n# - paged_lion_32bit\n# - paged_lion_8bit\n# - rmsprop\n# - rmsprop_bnb\n# - rmsprop_bnb_8bit\n# - rmsprop_bnb_32bit\n# - galore_adamw\n# - galore_adamw_8bit\n# - galore_adafactor\n# - galore_adamw_layerwise\n# - galore_adamw_8bit_layerwise\n# - galore_adafactor_layerwise\n# - lomo\n# - adalomo\n# - grokadamw\n# - schedule_free_adamw\n# - schedule_free_sgd\n# - apollo_adamw\n# - apollo_adamw_layerwise\n#\n# Additional custom optimizers include:\n# - optimi_adamw\n# - ao_adamw_8bit\n# - ao_adamw_fp8\noptimizer:\n# Dictionary of arguments to pass to the optimizer\noptim_args:\n# For Galore Optimizers the following optim_args are available\n# rank: # type: int\n# update_proj_gap # type: int\n# scale # type: float\n# proj_type: # type: str, default = std\n\n# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm\noptim_target_modules:\n# - self_attn # for llama\n# - mlp\n\n# Specify weight decay\nweight_decay:\n# adamw hyperparams\nadam_beta1:\nadam_beta2:\nadam_epsilon:\n# Gradient clipping max norm\nmax_grad_norm:\n\n# Augmentation techniques\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings\n# currently only supported on Llama and Mistral\nneftune_noise_alpha:\n\n# Whether to bettertransformers\nflash_optimum:\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers:\nxformers_attention:\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:\nflash_attention:\nflash_attn_cross_entropy: # Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_rms_norm: # Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_fuse_qkv: # Whether to fuse QKV into a single operation\nflash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation\n# Whether to use scaled-dot-product attention\n# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention:\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention:\n# Optional[bool]. Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage:\n# Resume from a specific checkpoint dir\nresume_from_checkpoint:\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: false\n\n## Multimodal section\n# int | tuple[int, int] | None . Size to resize images to, width x height.\n# Will read from model/processor config if not set.\nimage_size:\n# str. Algorithm to use for image resizing. \"bilinear\", \"bicubic\", \"lanczos\". Default is \"bilinear\".\nimage_resize_algorithm: 'bilinear'\n## End of multimodal section\n\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank:\n\n# Add or change special tokens.\n# If you add tokens here, you don't need to add them to the `tokens` list.\nspecial_tokens:\n # bos_token: \"<s>\"\n # eos_token: \"</s>\"\n # unk_token: \"<unk>\"\n # pad_token: \"[PAD]\"\n\n# Add extra tokens.\ntokens:\n\n# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.\n# Only works for tokens that are not part of the base vocab (aka are added_tokens).\n# Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: # Dict[int, str]\n# 128041: \"<|im_start|>\"\n# 128042: \"<|im_end|>\"\n\n# FSDP\nfsdp:\nfsdp_config:\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed:\n\n# Advanced DDP Arguments\nddp_timeout:\nddp_bucket_cap_mb:\nddp_broadcast_buffers:\n\n# Sequence parallelism\n# Set to a divisor of the number of GPUs available to split sequences into chunks of equal size.\n# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM.\n# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized\n# subsequences, or set to 4 to split into four equal-sized subsequences.\n# See https://axolotl-ai-cloud.github.io/axolotl/docs/sequence_parallelism.html for more details.\nsequence_parallel_degree:\n# Optional; strides across the key dimension. Larger values use more memory but should make training faster.\n# Must evenly divide the number of KV heads in your model.\nheads_k_stride: 1\n\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path:\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset:\n\n# Debug mode\ndebug:\n\n# Seed\nseed:\n\n# Allow overwrite yml config using from cli\nstrict:", "crumbs": [ "Getting Started", "Config Reference" @@ -174,7 +174,7 @@ "href": "docs/multi-gpu.html#sec-overview", "title": "Multi-GPU", "section": "1 Overview", - "text": "1 Overview\nAxolotl supports several methods for multi-GPU training:\n\nDeepSpeed (recommended)\nFSDP (Fully Sharded Data Parallel)\nFSDP + QLoRA", + "text": "1 Overview\nAxolotl supports several methods for multi-GPU training:\n\nDeepSpeed (recommended)\nFSDP (Fully Sharded Data Parallel)\nSequence parallelism\nFSDP + QLoRA", "crumbs": [ "Deployments", "Multi-GPU" @@ -196,7 +196,18 @@ "href": "docs/multi-gpu.html#sec-fsdp", "title": "Multi-GPU", "section": "3 FSDP", - "text": "3 FSDP\n\n3.1 Basic FSDP Configuration\nfsdp:\n - full_shard\n - auto_wrap\nfsdp_config:\n fsdp_offload_params: true\n fsdp_state_dict_type: FULL_STATE_DICT\n fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer\n\n\n3.2 FSDP + QLoRA\nFor combining FSDP with QLoRA, see our dedicated guide.", + "text": "3 FSDP\n\n3.1 Basic FSDP Configuration\nfsdp:\n - full_shard\n - auto_wrap\nfsdp_config:\n fsdp_offload_params: true\n fsdp_state_dict_type: FULL_STATE_DICT\n fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer", + "crumbs": [ + "Deployments", + "Multi-GPU" + ] + }, + { + "objectID": "docs/multi-gpu.html#sec-sequence-parallelism", + "href": "docs/multi-gpu.html#sec-sequence-parallelism", + "title": "Multi-GPU", + "section": "4 Sequence parallelism", + "text": "4 Sequence parallelism\nWe support sequence parallelism (SP) via the\nring-flash-attention project. This\nallows one to split up sequences across GPUs, which is useful in the event that a\nsingle sequence causes OOM errors during model training.\nFirst, install ring-flash-attn, recommended via pip install axolotl[ring-flash-attn],\nor from source with pip install .[ring-flash-attn].\nYour Axolotl YAML config should contain the following lines:\nsequence_parallel_degree: 4 # Split each sequence into 4 parts, one per GPU\nflash_attention: true # Required with sequence parallelism\n\n# Optional; strides across the key dimension. Larger values use more memory but will make training faster.\nheads_k_stride: 1\nSee our dedicated guide for more details.\n\n4.1 FSDP + QLoRA\nFor combining FSDP with QLoRA, see our dedicated guide.", "crumbs": [ "Deployments", "Multi-GPU" @@ -206,8 +217,8 @@ "objectID": "docs/multi-gpu.html#sec-performance", "href": "docs/multi-gpu.html#sec-performance", "title": "Multi-GPU", - "section": "4 Performance Optimization", - "text": "4 Performance Optimization\n\n4.1 Liger Kernel Integration\nPlease see docs for more info.", + "section": "5 Performance Optimization", + "text": "5 Performance Optimization\n\n5.1 Liger Kernel Integration\nPlease see docs for more info.", "crumbs": [ "Deployments", "Multi-GPU" @@ -217,8 +228,8 @@ "objectID": "docs/multi-gpu.html#sec-troubleshooting", "href": "docs/multi-gpu.html#sec-troubleshooting", "title": "Multi-GPU", - "section": "5 Troubleshooting", - "text": "5 Troubleshooting\n\n5.1 NCCL Issues\nFor NCCL-related problems, see our NCCL troubleshooting guide.\n\n\n5.2 Common Problems\n\nMemory IssuesTraining Instability\n\n\n\nReduce micro_batch_size\nReduce eval_batch_size\nAdjust gradient_accumulation_steps\nConsider using a higher ZeRO stage\n\n\n\n\nStart with DeepSpeed ZeRO-2\nMonitor loss values\nCheck learning rates\n\n\n\n\nFor more detailed troubleshooting, see our debugging guide.", + "section": "6 Troubleshooting", + "text": "6 Troubleshooting\n\n6.1 NCCL Issues\nFor NCCL-related problems, see our NCCL troubleshooting guide.\n\n\n6.2 Common Problems\n\nMemory IssuesTraining Instability\n\n\n\nReduce micro_batch_size\nReduce eval_batch_size\nAdjust gradient_accumulation_steps\nConsider using a higher ZeRO stage\n\n\n\n\nStart with DeepSpeed ZeRO-2\nMonitor loss values\nCheck learning rates\n\n\n\n\nFor more detailed troubleshooting, see our debugging guide.", "crumbs": [ "Deployments", "Multi-GPU" @@ -1571,63 +1582,99 @@ "href": "docs/sequence_parallelism.html", "title": "Sequence Parallelism", "section": "", - "text": "Sequence parallelism is a technique that splits sequences across multiple GPUs,\nallowing you to train with very long sequences that wouldn’t fit on a single GPU. Each\nGPU processes a different portion of the sequence, and the results are aggregated\nthrough a ring communication pattern.\n\n\nUse sequence parallelism when:\n\nYou need to train with sequence lengths that don’t fit into a single GPU’s memory\nYou have multiple GPUs available\nYou’re experiencing OOM (Out Of Memory) errors with long sequences\n\n\n\n\nTo enable sequence parallelism, add the following to your configuration file:\n# Set to a divisor (> 1) of the number of GPUs available\nsequence_parallel_degree: 4 # Split sequences across 4 GPUs\nThe sequence_parallel_degree should be a divisor of the total number of GPUs. For example:\n\nWith 8 GPUs, valid values would be 2, 4, or 8\nWith 4 GPUs, valid values would be 2 or 4\n\n\n\n\nWhen sequence parallelism is enabled:\n\nEach sequence is divided into equal chunks across the GPUs in a sequence parallel group\nThe data collator handles the chunking of input_ids, attention_mask, labels, and position_ids\nPosition IDs are adjusted to maintain proper relative positions, especially for packed sequences\nThe trainer uses special ring communication patterns for attention operations\n\n\n\n\nTo use sequence parallelism, you need:\n\nMultiple GPUs (at least 2)\nThe ring-flash-attn package. Install with:\n\npip install axolotl[ring-flash-attn] (preferred)\npip install ring-flash-attn>=0.1.4\n\n\n\n\n\n\nFlash attention must be enabled for this to work (flash_attention: true in config YAML)\nMay have a small performance overhead due to communication between GPUs\n\n\n\n\n# Example config with sequence parallelism\nbase_model: meta-llama/Llama-3-8B-Instruct\nsequence_len: 8192\nsequence_parallel_degree: 2 # Split each sequence into 4 parts\nflash_attention: true # Required with sequence parallelism\n...\nThis will train the Llama 3 8B model with 8K context length, with each sequence split\ninto 2 subsequences of length 4096 across 2 GPUs.\n\n\n\nSequence parallelism is compatible with Axolotl’s sample packing functionality. When using both features together:\n\nSamples are first packed together\nThe packed sequences are then divided across GPUs in the sequence parallel group\nPosition IDs are automatically adjusted to maintain proper relative positions\n\n\n\n\nWhen using sequence parallelism, your effective global batch size is divided by the sequence_parallel_degree. This happens because:\n\nEach group of sequence_parallel_degree GPUs works on the same batch (just different parts of each sequence)\nThe number of batches processed per step decreases\n\nFor example:\n- With 8 GPUs and no sequence parallelism: 8 different batches processed per step\n- With 8 GPUs and sequence_parallel_degree=4: Only 2 different batches processed per step (each split across 4 GPUs)\n- If your per-GPU micro_batch_size is 2, the global batch size decreases from 16 to 4" + "text": "Sequence parallelism is a technique that splits sequences across multiple GPUs,\nallowing you to train with very long sequences that wouldn’t fit on a single GPU. Each\nGPU processes a different portion of the sequence, and the results are aggregated\nthrough a ring communication pattern.\n\n\nUse sequence parallelism when:\n\nYou need to train with sequence lengths that don’t fit into a single GPU’s memory\nYou have multiple GPUs available\nYou’re experiencing OOM (Out Of Memory) errors with long sequences\n\n\n\n\nTo enable sequence parallelism, add the following to your configuration file:\n# Set to a divisor (> 1) of the number of GPUs available\nsequence_parallel_degree: 4 # Split sequences across 4 GPUs\n# Optional; strides across the key dimension. Larger values use more memory but should make training faster.\nheads_k_stride: 1\nThe sequence_parallel_degree should be a divisor of the total number of GPUs. For example:\n\nWith 8 GPUs, valid values would be 2, 4, or 8\nWith 4 GPUs, valid values would be 2 or 4\n\n\n\n\nWhen sequence parallelism is enabled:\n\nEach sequence is divided into equal chunks across the GPUs in a sequence parallel group\nThe data collator handles the chunking of input_ids, attention_mask, labels, and position_ids\nPosition IDs are adjusted to maintain proper relative positions, especially for packed sequences\nThe trainer uses special ring communication patterns for attention operations\n\n\n\n\nTo use sequence parallelism, you need:\n\nMultiple GPUs (at least 2)\nThe ring-flash-attn package. Install with:\n\npip install axolotl[ring-flash-attn] (preferred)\npip install ring-flash-attn>=0.1.4\n\n\n\n\n\n\nFlash attention must be enabled for this to work (flash_attention: true in config YAML)\nMay have a small performance overhead due to communication between GPUs\n\n\n\n\nbase_model: meta-llama/Llama-3-8B-Instruct\nsequence_len: 8192\n\n...\n\nsequence_parallel_degree: 4 # Split each sequence into 4 parts, one per GPU\nflash_attention: true # Required with sequence parallelism\n# Optional; strides across the key dimension. Larger values use more memory but should make training faster.\nheads_k_stride: 1\n\n...\nThis will train the Llama 3 8B model with 8K context length, with each sequence split\ninto 2 subsequences of length 4096 across 2 GPUs.\n\n\n\nSequence parallelism is compatible with Axolotl’s sample packing functionality. When using both features together:\n\nSamples are first packed together\nThe packed sequences are then divided across GPUs in the sequence parallel group\nPosition IDs are automatically adjusted to maintain proper relative positions\n\n\n\n\nWhen using sequence parallelism, your effective global batch size is divided by the sequence_parallel_degree. This happens because:\n\nEach group of sequence_parallel_degree GPUs works on the same batch (just different parts of each sequence)\nThe number of batches processed per step decreases\n\nFor example:\n- With 8 GPUs and no sequence parallelism: 8 different batches processed per step\n- With 8 GPUs and sequence_parallel_degree=4: Only 2 different batches processed per step (each split across 4 GPUs)\n- If your per-GPU micro_batch_size is 2, the global batch size decreases from 16 to 4", + "crumbs": [ + "Advanced Features", + "Sequence Parallelism" + ] }, { "objectID": "docs/sequence_parallelism.html#when-to-use-sequence-parallelism", "href": "docs/sequence_parallelism.html#when-to-use-sequence-parallelism", "title": "Sequence Parallelism", "section": "", - "text": "Use sequence parallelism when:\n\nYou need to train with sequence lengths that don’t fit into a single GPU’s memory\nYou have multiple GPUs available\nYou’re experiencing OOM (Out Of Memory) errors with long sequences" + "text": "Use sequence parallelism when:\n\nYou need to train with sequence lengths that don’t fit into a single GPU’s memory\nYou have multiple GPUs available\nYou’re experiencing OOM (Out Of Memory) errors with long sequences", + "crumbs": [ + "Advanced Features", + "Sequence Parallelism" + ] }, { "objectID": "docs/sequence_parallelism.html#configuration", "href": "docs/sequence_parallelism.html#configuration", "title": "Sequence Parallelism", "section": "", - "text": "To enable sequence parallelism, add the following to your configuration file:\n# Set to a divisor (> 1) of the number of GPUs available\nsequence_parallel_degree: 4 # Split sequences across 4 GPUs\nThe sequence_parallel_degree should be a divisor of the total number of GPUs. For example:\n\nWith 8 GPUs, valid values would be 2, 4, or 8\nWith 4 GPUs, valid values would be 2 or 4" + "text": "To enable sequence parallelism, add the following to your configuration file:\n# Set to a divisor (> 1) of the number of GPUs available\nsequence_parallel_degree: 4 # Split sequences across 4 GPUs\n# Optional; strides across the key dimension. Larger values use more memory but should make training faster.\nheads_k_stride: 1\nThe sequence_parallel_degree should be a divisor of the total number of GPUs. For example:\n\nWith 8 GPUs, valid values would be 2, 4, or 8\nWith 4 GPUs, valid values would be 2 or 4", + "crumbs": [ + "Advanced Features", + "Sequence Parallelism" + ] }, { "objectID": "docs/sequence_parallelism.html#implementation-details", "href": "docs/sequence_parallelism.html#implementation-details", "title": "Sequence Parallelism", "section": "", - "text": "When sequence parallelism is enabled:\n\nEach sequence is divided into equal chunks across the GPUs in a sequence parallel group\nThe data collator handles the chunking of input_ids, attention_mask, labels, and position_ids\nPosition IDs are adjusted to maintain proper relative positions, especially for packed sequences\nThe trainer uses special ring communication patterns for attention operations" + "text": "When sequence parallelism is enabled:\n\nEach sequence is divided into equal chunks across the GPUs in a sequence parallel group\nThe data collator handles the chunking of input_ids, attention_mask, labels, and position_ids\nPosition IDs are adjusted to maintain proper relative positions, especially for packed sequences\nThe trainer uses special ring communication patterns for attention operations", + "crumbs": [ + "Advanced Features", + "Sequence Parallelism" + ] }, { "objectID": "docs/sequence_parallelism.html#requirements", "href": "docs/sequence_parallelism.html#requirements", "title": "Sequence Parallelism", "section": "", - "text": "To use sequence parallelism, you need:\n\nMultiple GPUs (at least 2)\nThe ring-flash-attn package. Install with:\n\npip install axolotl[ring-flash-attn] (preferred)\npip install ring-flash-attn>=0.1.4" + "text": "To use sequence parallelism, you need:\n\nMultiple GPUs (at least 2)\nThe ring-flash-attn package. Install with:\n\npip install axolotl[ring-flash-attn] (preferred)\npip install ring-flash-attn>=0.1.4", + "crumbs": [ + "Advanced Features", + "Sequence Parallelism" + ] }, { "objectID": "docs/sequence_parallelism.html#limitations", "href": "docs/sequence_parallelism.html#limitations", "title": "Sequence Parallelism", "section": "", - "text": "Flash attention must be enabled for this to work (flash_attention: true in config YAML)\nMay have a small performance overhead due to communication between GPUs" + "text": "Flash attention must be enabled for this to work (flash_attention: true in config YAML)\nMay have a small performance overhead due to communication between GPUs", + "crumbs": [ + "Advanced Features", + "Sequence Parallelism" + ] }, { "objectID": "docs/sequence_parallelism.html#example", "href": "docs/sequence_parallelism.html#example", "title": "Sequence Parallelism", "section": "", - "text": "# Example config with sequence parallelism\nbase_model: meta-llama/Llama-3-8B-Instruct\nsequence_len: 8192\nsequence_parallel_degree: 2 # Split each sequence into 4 parts\nflash_attention: true # Required with sequence parallelism\n...\nThis will train the Llama 3 8B model with 8K context length, with each sequence split\ninto 2 subsequences of length 4096 across 2 GPUs." + "text": "base_model: meta-llama/Llama-3-8B-Instruct\nsequence_len: 8192\n\n...\n\nsequence_parallel_degree: 4 # Split each sequence into 4 parts, one per GPU\nflash_attention: true # Required with sequence parallelism\n# Optional; strides across the key dimension. Larger values use more memory but should make training faster.\nheads_k_stride: 1\n\n...\nThis will train the Llama 3 8B model with 8K context length, with each sequence split\ninto 2 subsequences of length 4096 across 2 GPUs.", + "crumbs": [ + "Advanced Features", + "Sequence Parallelism" + ] }, { "objectID": "docs/sequence_parallelism.html#sample-packing-with-sequence-parallelism", "href": "docs/sequence_parallelism.html#sample-packing-with-sequence-parallelism", "title": "Sequence Parallelism", "section": "", - "text": "Sequence parallelism is compatible with Axolotl’s sample packing functionality. When using both features together:\n\nSamples are first packed together\nThe packed sequences are then divided across GPUs in the sequence parallel group\nPosition IDs are automatically adjusted to maintain proper relative positions" + "text": "Sequence parallelism is compatible with Axolotl’s sample packing functionality. When using both features together:\n\nSamples are first packed together\nThe packed sequences are then divided across GPUs in the sequence parallel group\nPosition IDs are automatically adjusted to maintain proper relative positions", + "crumbs": [ + "Advanced Features", + "Sequence Parallelism" + ] }, { "objectID": "docs/sequence_parallelism.html#effect-on-batch-size", "href": "docs/sequence_parallelism.html#effect-on-batch-size", "title": "Sequence Parallelism", "section": "", - "text": "When using sequence parallelism, your effective global batch size is divided by the sequence_parallel_degree. This happens because:\n\nEach group of sequence_parallel_degree GPUs works on the same batch (just different parts of each sequence)\nThe number of batches processed per step decreases\n\nFor example:\n- With 8 GPUs and no sequence parallelism: 8 different batches processed per step\n- With 8 GPUs and sequence_parallel_degree=4: Only 2 different batches processed per step (each split across 4 GPUs)\n- If your per-GPU micro_batch_size is 2, the global batch size decreases from 16 to 4" + "text": "When using sequence parallelism, your effective global batch size is divided by the sequence_parallel_degree. This happens because:\n\nEach group of sequence_parallel_degree GPUs works on the same batch (just different parts of each sequence)\nThe number of batches processed per step decreases\n\nFor example:\n- With 8 GPUs and no sequence parallelism: 8 different batches processed per step\n- With 8 GPUs and sequence_parallel_degree=4: Only 2 different batches processed per step (each split across 4 GPUs)\n- If your per-GPU micro_batch_size is 2, the global batch size decreases from 16 to 4", + "crumbs": [ + "Advanced Features", + "Sequence Parallelism" + ] }, { "objectID": "docs/multipack.html", diff --git a/sitemap.xml b/sitemap.xml index 5f8c05b17..f34779672 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,674 +2,674 @@ https://axolotl-ai-cloud.github.io/axolotl/examples/colab-notebooks/colab-axolotl-example.html - 2025-03-31T06:40:26.194Z + 2025-03-31T13:13:55.601Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/stepwise_supervised.html - 2025-03-31T06:40:26.190Z + 2025-03-31T13:13:55.597Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/template_free.html - 2025-03-31T06:40:26.190Z + 2025-03-31T13:13:55.597Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/tokenized.html - 2025-03-31T06:40:26.190Z + 2025-03-31T13:13:55.597Z https://axolotl-ai-cloud.github.io/axolotl/docs/nccl.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/amd_hpc.html - 2025-03-31T06:40:26.189Z + 2025-03-31T13:13:55.596Z https://axolotl-ai-cloud.github.io/axolotl/docs/config.html - 2025-03-31T06:40:26.189Z + 2025-03-31T13:13:55.596Z https://axolotl-ai-cloud.github.io/axolotl/docs/multi-gpu.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/installation.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/torchao.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/reward_modelling.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/input_output.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/multimodal.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.callbacks.mlflow_.html - 2025-03-31T06:40:57.018Z + 2025-03-31T13:14:44.106Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.trainer_fsdp_optim.html - 2025-03-31T06:40:56.618Z + 2025-03-31T13:14:43.710Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.data.batch_dataset_fetcher.html - 2025-03-31T06:40:56.634Z + 2025-03-31T13:14:43.726Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.stepwise_supervised.html - 2025-03-31T06:40:56.328Z + 2025-03-31T13:14:43.422Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.mistral_attn_hijack_flash.html - 2025-03-31T06:40:56.567Z + 2025-03-31T13:14:43.660Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.dpo.user_defined.html - 2025-03-31T06:40:56.374Z + 2025-03-31T13:14:43.468Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.liger.args.html - 2025-03-31T06:40:56.936Z + 2025-03-31T13:14:44.025Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.training.html - 2025-03-31T06:40:56.800Z + 2025-03-31T13:14:43.891Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/datasets.html - 2025-03-31T06:40:55.838Z + 2025-03-31T13:14:42.935Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/kernels.geglu.html - 2025-03-31T06:40:56.508Z + 2025-03-31T13:14:43.601Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.llama_attn_hijack_flash.html - 2025-03-31T06:40:56.551Z + 2025-03-31T13:14:43.644Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.sweeps.html - 2025-03-31T06:40:56.164Z + 2025-03-31T13:14:43.259Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.freeze.html - 2025-03-31T06:40:56.705Z + 2025-03-31T13:14:43.796Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.multipack.html - 2025-03-31T06:40:56.569Z + 2025-03-31T13:14:43.661Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.main.html - 2025-03-31T06:40:56.064Z + 2025-03-31T13:14:43.160Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.trainers.trl.html - 2025-03-31T06:40:56.239Z + 2025-03-31T13:14:43.333Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.dpo.passthrough.html - 2025-03-31T06:40:56.376Z + 2025-03-31T13:14:43.470Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.chat.format.llama3x.html - 2025-03-31T06:40:56.019Z + 2025-03-31T13:14:43.116Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.datasets.transforms.chat_builder.html - 2025-03-31T06:40:56.034Z + 2025-03-31T13:14:43.130Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.kto.user_defined.html - 2025-03-31T06:40:56.393Z + 2025-03-31T13:14:43.487Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.collators.mamba.html - 2025-03-31T06:40:56.993Z + 2025-03-31T13:14:44.081Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.base.html - 2025-03-31T06:40:56.921Z + 2025-03-31T13:14:44.010Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.bench.html - 2025-03-31T06:40:56.697Z + 2025-03-31T13:14:43.788Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/kernels.swiglu.html - 2025-03-31T06:40:56.517Z + 2025-03-31T13:14:43.611Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.chat.format.shared.html - 2025-03-31T06:40:56.021Z + 2025-03-31T13:14:43.118Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.cut_cross_entropy.args.html - 2025-03-31T06:40:56.924Z + 2025-03-31T13:14:44.013Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.datasets.chat.html - 2025-03-31T06:40:56.026Z + 2025-03-31T13:14:43.123Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.callbacks.lisa.html - 2025-03-31T06:40:57.014Z + 2025-03-31T13:14:44.102Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.grokfast.optimizer.html - 2025-03-31T06:40:56.925Z + 2025-03-31T13:14:44.014Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.alpaca_chat.html - 2025-03-31T06:40:56.278Z + 2025-03-31T13:14:43.372Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.alpaca_instruct.html - 2025-03-31T06:40:56.280Z + 2025-03-31T13:14:43.374Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.kto.chatml.html - 2025-03-31T06:40:56.392Z + 2025-03-31T13:14:43.486Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.integrations.html - 2025-03-31T06:40:56.846Z + 2025-03-31T13:14:43.936Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.trl.html - 2025-03-31T06:40:56.829Z + 2025-03-31T13:14:43.919Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_tokenizers.html - 2025-03-31T06:40:55.892Z + 2025-03-31T13:14:42.989Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.data.sft.html - 2025-03-31T06:40:56.778Z + 2025-03-31T13:14:43.868Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schedulers.html - 2025-03-31T06:40:56.746Z + 2025-03-31T13:14:43.836Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.chat_templates.html - 2025-03-31T06:40:56.680Z + 2025-03-31T13:14:43.772Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.models.html - 2025-03-31T06:40:56.664Z + 2025-03-31T13:14:43.756Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.dpo.chatml.html - 2025-03-31T06:40:56.371Z + 2025-03-31T13:14:43.465Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.distributed.html - 2025-03-31T06:40:56.764Z + 2025-03-31T13:14:43.855Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.utils.html - 2025-03-31T06:40:56.607Z + 2025-03-31T13:14:43.699Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.utils.html - 2025-03-31T06:40:56.858Z + 2025-03-31T13:14:43.948Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.llama_expand_mask.html - 2025-03-31T06:40:56.577Z + 2025-03-31T13:14:43.669Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/common.datasets.html - 2025-03-31T06:40:56.961Z + 2025-03-31T13:14:44.050Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/logging_config.html - 2025-03-31T06:40:55.897Z + 2025-03-31T13:14:42.994Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/kernels.quantize.html - 2025-03-31T06:40:56.525Z + 2025-03-31T13:14:43.618Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.llama_patch_multipack.html - 2025-03-31T06:40:56.609Z + 2025-03-31T13:14:43.702Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.callbacks.comet_.html - 2025-03-31T06:40:57.021Z + 2025-03-31T13:14:44.109Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.trainer.html - 2025-03-31T06:40:56.721Z + 2025-03-31T13:14:43.813Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/common.architectures.html - 2025-03-31T06:40:56.944Z + 2025-03-31T13:14:44.033Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/models.mamba.modeling_mamba.html - 2025-03-31T06:40:56.962Z + 2025-03-31T13:14:44.051Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.spectrum.args.html - 2025-03-31T06:40:56.942Z + 2025-03-31T13:14:44.031Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.merge_sharded_fsdp_weights.html - 2025-03-31T06:40:56.151Z + 2025-03-31T13:14:43.245Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.bradley_terry.llama3.html - 2025-03-31T06:40:56.417Z + 2025-03-31T13:14:43.511Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.merge_lora.html - 2025-03-31T06:40:56.139Z + 2025-03-31T13:14:43.234Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.lora.html - 2025-03-31T06:40:56.685Z + 2025-03-31T13:14:43.777Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.relora.html - 2025-03-31T06:40:56.575Z + 2025-03-31T13:14:43.668Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.cloud.base.html - 2025-03-31T06:40:56.198Z + 2025-03-31T13:14:43.293Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/common.const.html - 2025-03-31T06:40:56.945Z + 2025-03-31T13:14:44.034Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/convert.html - 2025-03-31T06:40:55.851Z + 2025-03-31T13:14:42.948Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.chat_template.html - 2025-03-31T06:40:56.265Z + 2025-03-31T13:14:43.359Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/kernels.utils.html - 2025-03-31T06:40:56.526Z + 2025-03-31T13:14:43.619Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.lora_embeddings.html - 2025-03-31T06:40:56.688Z + 2025-03-31T13:14:43.780Z https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/batch_vs_grad.html - 2025-03-31T06:40:26.189Z + 2025-03-31T13:13:55.596Z https://axolotl-ai-cloud.github.io/axolotl/docs/faq.html - 2025-03-31T06:40:26.190Z + 2025-03-31T13:13:55.597Z https://axolotl-ai-cloud.github.io/axolotl/docs/debugging.html - 2025-03-31T06:40:26.190Z + 2025-03-31T13:13:55.597Z https://axolotl-ai-cloud.github.io/axolotl/docs/lr_groups.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/TODO.html - 2025-03-31T06:40:26.188Z + 2025-03-31T13:13:55.595Z https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/LICENSE.html - 2025-03-31T06:40:26.208Z + 2025-03-31T13:13:55.616Z https://axolotl-ai-cloud.github.io/axolotl/index.html - 2025-03-31T06:40:26.205Z + 2025-03-31T13:13:55.612Z https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html - 2025-03-31T06:40:26.209Z + 2025-03-31T13:13:55.616Z https://axolotl-ai-cloud.github.io/axolotl/FAQS.html - 2025-03-31T06:40:26.188Z + 2025-03-31T13:13:55.595Z https://axolotl-ai-cloud.github.io/axolotl/docs/multi-node.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/sequence_parallelism.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/multipack.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/inference.html - 2025-03-31T06:40:26.192Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/getting-started.html - 2025-03-31T06:40:26.190Z + 2025-03-31T13:13:55.597Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.callbacks.perplexity.html - 2025-03-31T06:40:57.009Z + 2025-03-31T13:14:44.097Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.trainer_builder.html - 2025-03-31T06:40:55.912Z + 2025-03-31T13:14:43.009Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.train.html - 2025-03-31T06:40:56.072Z + 2025-03-31T13:14:43.168Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.dpo.llama3.html - 2025-03-31T06:40:56.361Z + 2025-03-31T13:14:43.455Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.cloud.modal_.html - 2025-03-31T06:40:56.205Z + 2025-03-31T13:14:43.299Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/index.html - 2025-03-31T06:40:55.760Z + 2025-03-31T13:14:42.857Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.input_output.html - 2025-03-31T06:40:56.324Z + 2025-03-31T13:14:43.418Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.optimizers.adopt.html - 2025-03-31T06:40:56.775Z + 2025-03-31T13:14:43.865Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.btlm_attn_hijack_flash.html - 2025-03-31T06:40:56.608Z + 2025-03-31T13:14:43.700Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.collators.core.html - 2025-03-31T06:40:56.964Z + 2025-03-31T13:14:44.052Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.datasets.html - 2025-03-31T06:40:56.818Z + 2025-03-31T13:14:43.908Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.kd.trainer.html - 2025-03-31T06:40:56.933Z + 2025-03-31T13:14:44.022Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.tokenization.html - 2025-03-31T06:40:56.670Z + 2025-03-31T13:14:43.762Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.mixtral.html - 2025-03-31T06:40:56.636Z + 2025-03-31T13:14:43.727Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.stablelm_attn_hijack_flash.html - 2025-03-31T06:40:56.615Z + 2025-03-31T13:14:43.707Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.model.html - 2025-03-31T06:40:56.796Z + 2025-03-31T13:14:43.886Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.multimodal.html - 2025-03-31T06:40:56.834Z + 2025-03-31T13:14:43.924Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.gradient_checkpointing.unsloth.html - 2025-03-31T06:40:56.781Z + 2025-03-31T13:14:43.871Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.trainers.base.html - 2025-03-31T06:40:56.222Z + 2025-03-31T13:14:43.316Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.unsloth_.html - 2025-03-31T06:40:56.626Z + 2025-03-31T13:14:43.718Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.samplers.multipack.html - 2025-03-31T06:40:57.003Z + 2025-03-31T13:14:44.091Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.callbacks.profiler.html - 2025-03-31T06:40:57.013Z + 2025-03-31T13:14:44.101Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/integrations.lm_eval.args.html - 2025-03-31T06:40:56.939Z + 2025-03-31T13:14:44.028Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.data.pretraining.html - 2025-03-31T06:40:56.777Z + 2025-03-31T13:14:43.867Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/evaluate.html - 2025-03-31T06:40:55.830Z + 2025-03-31T13:14:42.927Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.dict.html - 2025-03-31T06:40:56.768Z + 2025-03-31T13:14:43.858Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.utils.html - 2025-03-31T06:40:56.195Z + 2025-03-31T13:14:43.290Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.pygmalion.html - 2025-03-31T06:40:56.345Z + 2025-03-31T13:14:43.440Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.training_args.html - 2025-03-31T06:40:55.995Z + 2025-03-31T13:14:43.091Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.inference.html - 2025-03-31T06:40:56.131Z + 2025-03-31T13:14:43.226Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/kernels.lora.html - 2025-03-31T06:40:56.497Z + 2025-03-31T13:14:43.590Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.evaluate.html - 2025-03-31T06:40:56.080Z + 2025-03-31T13:14:43.176Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.collators.batching.html - 2025-03-31T06:40:56.989Z + 2025-03-31T13:14:44.078Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.completion.html - 2025-03-31T06:40:56.318Z + 2025-03-31T13:14:43.412Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.dpo.zephyr.html - 2025-03-31T06:40:56.373Z + 2025-03-31T13:14:43.467Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.metharme.html - 2025-03-31T06:40:56.335Z + 2025-03-31T13:14:43.429Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.orpo.chat_template.html - 2025-03-31T06:40:56.413Z + 2025-03-31T13:14:43.507Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.alpaca_w_system.html - 2025-03-31T06:40:56.291Z + 2025-03-31T13:14:43.385Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.model_shard_quant.html - 2025-03-31T06:40:56.694Z + 2025-03-31T13:14:43.785Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.config.html - 2025-03-31T06:40:56.117Z + 2025-03-31T13:14:43.212Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.enums.html - 2025-03-31T06:40:56.853Z + 2025-03-31T13:14:43.943Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.preprocess.html - 2025-03-31T06:40:56.159Z + 2025-03-31T13:14:43.253Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.chat.messages.html - 2025-03-31T06:40:56.017Z + 2025-03-31T13:14:43.113Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.dpo.chat_template.html - 2025-03-31T06:40:56.351Z + 2025-03-31T13:14:43.445Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.peft.html - 2025-03-31T06:40:56.826Z + 2025-03-31T13:14:43.916Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/train.html - 2025-03-31T06:40:55.820Z + 2025-03-31T13:14:42.917Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.messages.chat.html - 2025-03-31T06:40:56.350Z + 2025-03-31T13:14:43.444Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.orcamini.html - 2025-03-31T06:40:56.339Z + 2025-03-31T13:14:43.433Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.collators.mm_chat.html - 2025-03-31T06:40:56.997Z + 2025-03-31T13:14:44.086Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.kto.llama3.html - 2025-03-31T06:40:56.384Z + 2025-03-31T13:14:43.478Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.attention.mllama.html - 2025-03-31T06:40:56.633Z + 2025-03-31T13:14:43.724Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.checks.html - 2025-03-31T06:40:56.100Z + 2025-03-31T13:14:43.195Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.transformers_fa_utils.html - 2025-03-31T06:40:56.625Z + 2025-03-31T13:14:43.716Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.llama_attn_hijack_xformers.html - 2025-03-31T06:40:56.553Z + 2025-03-31T13:14:43.646Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.trainers.dpo.trainer.html - 2025-03-31T06:40:56.245Z + 2025-03-31T13:14:43.339Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.user_defined.html - 2025-03-31T06:40:56.299Z + 2025-03-31T13:14:43.393Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/cli.args.html - 2025-03-31T06:40:56.094Z + 2025-03-31T13:14:43.189Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.llama2_chat.html - 2025-03-31T06:40:56.312Z + 2025-03-31T13:14:43.406Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/utils.schemas.config.html - 2025-03-31T06:40:56.789Z + 2025-03-31T13:14:43.879Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.trainers.grpo.trainer.html - 2025-03-31T06:40:56.249Z + 2025-03-31T13:14:43.343Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/core.chat.format.chatml.html - 2025-03-31T06:40:56.018Z + 2025-03-31T13:14:43.115Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/monkeypatch.lora_kernels.html - 2025-03-31T06:40:56.599Z + 2025-03-31T13:14:43.691Z https://axolotl-ai-cloud.github.io/axolotl/docs/api/prompt_strategies.base.html - 2025-03-31T06:40:56.250Z + 2025-03-31T13:14:43.344Z https://axolotl-ai-cloud.github.io/axolotl/docs/rlhf.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/cli.html - 2025-03-31T06:40:26.189Z + 2025-03-31T13:13:55.596Z https://axolotl-ai-cloud.github.io/axolotl/docs/unsloth.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/fsdp_qlora.html - 2025-03-31T06:40:26.190Z + 2025-03-31T13:13:55.597Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset_preprocessing.html - 2025-03-31T06:40:26.190Z + 2025-03-31T13:13:55.597Z https://axolotl-ai-cloud.github.io/axolotl/docs/custom_integrations.html - 2025-03-31T06:40:26.189Z + 2025-03-31T13:13:55.596Z https://axolotl-ai-cloud.github.io/axolotl/docs/mac.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/docker.html - 2025-03-31T06:40:26.190Z + 2025-03-31T13:13:55.597Z https://axolotl-ai-cloud.github.io/axolotl/docs/ray-integration.html - 2025-03-31T06:40:26.193Z + 2025-03-31T13:13:55.600Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/index.html - 2025-03-31T06:40:26.189Z + 2025-03-31T13:13:55.597Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/conversation.html - 2025-03-31T06:40:26.189Z + 2025-03-31T13:13:55.596Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/pretraining.html - 2025-03-31T06:40:26.189Z + 2025-03-31T13:13:55.597Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/inst_tune.html - 2025-03-31T06:40:26.189Z + 2025-03-31T13:13:55.597Z diff --git a/src/axolotl/integrations/LICENSE.html b/src/axolotl/integrations/LICENSE.html index aaf5a2033..3daa1aca2 100644 --- a/src/axolotl/integrations/LICENSE.html +++ b/src/axolotl/integrations/LICENSE.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + + diff --git a/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html b/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html index e82b1021a..9f95dd470 100644 --- a/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html +++ b/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html @@ -356,6 +356,12 @@ ul.task-list li input[type="checkbox"] { Custom Integrations + +