From 9564d8f7c6a1a1fa41ae86684ea6820b31c4f9f2 Mon Sep 17 00:00:00 2001 From: Quarto GHA Workflow Runner Date: Tue, 15 Jul 2025 00:16:43 +0000 Subject: [PATCH] Built site for gh-pages --- .nojekyll | 2 +- FAQS.html | 6 + TODO.html | 6 + docs/amd_hpc.html | 6 + docs/api/cli.args.html | 6 + docs/api/cli.checks.html | 6 + docs/api/cli.cloud.base.html | 6 + docs/api/cli.cloud.modal_.html | 6 + docs/api/cli.config.html | 6 + docs/api/cli.evaluate.html | 6 + docs/api/cli.inference.html | 6 + docs/api/cli.main.html | 6 + docs/api/cli.merge_lora.html | 6 + docs/api/cli.merge_sharded_fsdp_weights.html | 6 + docs/api/cli.preprocess.html | 6 + docs/api/cli.quantize.html | 6 + docs/api/cli.sweeps.html | 6 + docs/api/cli.train.html | 6 + docs/api/cli.utils.html | 6 + docs/api/cli.vllm_serve.html | 6 + docs/api/common.architectures.html | 6 + docs/api/common.const.html | 6 + docs/api/common.datasets.html | 6 + docs/api/convert.html | 6 + docs/api/core.builders.base.html | 6 + docs/api/core.builders.causal.html | 6 + docs/api/core.builders.rl.html | 6 + docs/api/core.chat.format.chatml.html | 6 + docs/api/core.chat.format.llama3x.html | 6 + docs/api/core.chat.format.shared.html | 6 + docs/api/core.chat.messages.html | 6 + docs/api/core.datasets.chat.html | 6 + ...core.datasets.transforms.chat_builder.html | 6 + docs/api/core.trainers.base.html | 6 + docs/api/core.trainers.dpo.trainer.html | 6 + docs/api/core.trainers.grpo.sampler.html | 6 + docs/api/core.trainers.grpo.trainer.html | 6 + docs/api/core.trainers.mamba.html | 6 + docs/api/core.trainers.mixins.optimizer.html | 6 + ...core.trainers.mixins.rng_state_loader.html | 6 + docs/api/core.trainers.mixins.scheduler.html | 6 + docs/api/core.trainers.relora.html | 6 + docs/api/core.trainers.trl.html | 6 + docs/api/core.trainers.utils.html | 6 + docs/api/core.training_args.html | 6 + docs/api/datasets.html | 6 + docs/api/evaluate.html | 6 + docs/api/index.html | 6 + docs/api/integrations.base.html | 6 + .../integrations.cut_cross_entropy.args.html | 6 + docs/api/integrations.grokfast.optimizer.html | 6 + docs/api/integrations.kd.trainer.html | 6 + docs/api/integrations.liger.args.html | 6 + docs/api/integrations.lm_eval.args.html | 6 + docs/api/integrations.spectrum.args.html | 6 + docs/api/kernels.geglu.html | 6 + docs/api/kernels.lora.html | 6 + docs/api/kernels.quantize.html | 6 + docs/api/kernels.swiglu.html | 6 + docs/api/kernels.utils.html | 6 + docs/api/loaders.adapter.html | 6 + docs/api/loaders.constants.html | 6 + docs/api/loaders.model.html | 6 + docs/api/loaders.patch_manager.html | 6 + docs/api/loaders.processor.html | 6 + docs/api/loaders.tokenizer.html | 6 + docs/api/logging_config.html | 6 + docs/api/models.mamba.modeling_mamba.html | 6 + .../monkeypatch.btlm_attn_hijack_flash.html | 6 + ...onkeypatch.data.batch_dataset_fetcher.html | 6 + ...ch.gradient_checkpointing.offload_cpu.html | 18 +- ...h.gradient_checkpointing.offload_disk.html | 6 + .../monkeypatch.llama_attn_hijack_flash.html | 6 + ...onkeypatch.llama_attn_hijack_xformers.html | 6 + docs/api/monkeypatch.llama_expand_mask.html | 6 + .../monkeypatch.llama_patch_multipack.html | 6 + docs/api/monkeypatch.lora_kernels.html | 6 + ...monkeypatch.mistral_attn_hijack_flash.html | 6 + docs/api/monkeypatch.mixtral.html | 6 + docs/api/monkeypatch.multipack.html | 6 + docs/api/monkeypatch.relora.html | 6 + ...onkeypatch.stablelm_attn_hijack_flash.html | 6 + docs/api/monkeypatch.trainer_fsdp_optim.html | 6 + .../monkeypatch.transformers_fa_utils.html | 6 + docs/api/monkeypatch.unsloth_.html | 6 + docs/api/monkeypatch.utils.html | 6 + docs/api/prompt_strategies.alpaca_chat.html | 6 + .../prompt_strategies.alpaca_instruct.html | 6 + .../prompt_strategies.alpaca_w_system.html | 6 + docs/api/prompt_strategies.base.html | 6 + ...rompt_strategies.bradley_terry.llama3.html | 6 + docs/api/prompt_strategies.chat_template.html | 6 + docs/api/prompt_strategies.completion.html | 6 + .../prompt_strategies.dpo.chat_template.html | 6 + docs/api/prompt_strategies.dpo.chatml.html | 6 + docs/api/prompt_strategies.dpo.llama3.html | 6 + .../prompt_strategies.dpo.passthrough.html | 6 + .../prompt_strategies.dpo.user_defined.html | 6 + docs/api/prompt_strategies.dpo.zephyr.html | 6 + docs/api/prompt_strategies.input_output.html | 6 + docs/api/prompt_strategies.kto.chatml.html | 6 + docs/api/prompt_strategies.kto.llama3.html | 6 + .../prompt_strategies.kto.user_defined.html | 6 + docs/api/prompt_strategies.llama2_chat.html | 6 + docs/api/prompt_strategies.messages.chat.html | 6 + docs/api/prompt_strategies.metharme.html | 6 + docs/api/prompt_strategies.orcamini.html | 6 + .../prompt_strategies.orpo.chat_template.html | 6 + docs/api/prompt_strategies.pygmalion.html | 6 + ...prompt_strategies.stepwise_supervised.html | 6 + docs/api/prompt_strategies.user_defined.html | 6 + docs/api/prompt_tokenizers.html | 6 + docs/api/train.html | 6 + docs/api/utils.bench.html | 6 + docs/api/utils.callbacks.comet_.html | 6 + docs/api/utils.callbacks.lisa.html | 6 + docs/api/utils.callbacks.mlflow_.html | 6 + docs/api/utils.callbacks.perplexity.html | 6 + docs/api/utils.callbacks.profiler.html | 11 +- docs/api/utils.callbacks.qat.html | 6 + docs/api/utils.chat_templates.html | 6 + docs/api/utils.collators.batching.html | 6 + docs/api/utils.collators.core.html | 6 + docs/api/utils.collators.mamba.html | 6 + docs/api/utils.collators.mm_chat.html | 6 + .../utils.ctx_managers.sequence_parallel.html | 6 + docs/api/utils.data.pretraining.html | 6 + docs/api/utils.data.sft.html | 6 + docs/api/utils.dict.html | 6 + docs/api/utils.distributed.html | 6 + docs/api/utils.freeze.html | 6 + docs/api/utils.lora.html | 6 + docs/api/utils.model_shard_quant.html | 6 + docs/api/utils.optimizers.adopt.html | 6 + docs/api/utils.quantization.html | 6 + docs/api/utils.samplers.multipack.html | 6 + docs/api/utils.schedulers.html | 6 + docs/api/utils.schemas.config.html | 6 + docs/api/utils.schemas.datasets.html | 6 + docs/api/utils.schemas.enums.html | 6 + docs/api/utils.schemas.integrations.html | 6 + docs/api/utils.schemas.model.html | 6 + docs/api/utils.schemas.multimodal.html | 6 + docs/api/utils.schemas.peft.html | 6 + docs/api/utils.schemas.training.html | 6 + docs/api/utils.schemas.trl.html | 6 + docs/api/utils.schemas.utils.html | 6 + docs/api/utils.tokenization.html | 6 + docs/api/utils.trainer.html | 6 + docs/batch_vs_grad.html | 6 + docs/cli.html | 6 + docs/config-reference.html | 1158 +++++++++-------- docs/custom_integrations.html | 6 + docs/dataset-formats/conversation.html | 6 + docs/dataset-formats/index.html | 6 + docs/dataset-formats/inst_tune.html | 6 + docs/dataset-formats/pretraining.html | 6 + docs/dataset-formats/stepwise_supervised.html | 6 + docs/dataset-formats/template_free.html | 6 + docs/dataset-formats/tokenized.html | 6 + docs/dataset_loading.html | 6 + docs/dataset_preprocessing.html | 6 + docs/debugging.html | 6 + docs/docker.html | 6 + docs/faq.html | 6 + docs/fsdp_qlora.html | 6 + docs/getting-started.html | 6 + docs/gradient_checkpointing.html | 928 +++++++++++++ docs/inference.html | 6 + docs/input_output.html | 6 + docs/installation.html | 6 + docs/lora_optims.html | 6 + docs/lr_groups.html | 6 + docs/mac.html | 6 + docs/multi-gpu.html | 6 + docs/multi-node.html | 6 + docs/multimodal.html | 6 + docs/multipack.html | 6 + docs/nccl.html | 6 + docs/qat.html | 6 + docs/quantize.html | 6 + docs/ray-integration.html | 6 + docs/reward_modelling.html | 6 + docs/rlhf.html | 6 + docs/sequence_parallelism.html | 6 + docs/torchao.html | 6 + docs/unsloth.html | 6 + .../colab-axolotl-example.html | 6 + index.html | 6 + search.json | 85 +- sitemap.xml | 402 +++--- src/axolotl/integrations/LICENSE.html | 6 + .../cut_cross_entropy/ACKNOWLEDGEMENTS.html | 6 + 193 files changed, 2897 insertions(+), 823 deletions(-) create mode 100644 docs/gradient_checkpointing.html diff --git a/.nojekyll b/.nojekyll index 2cc07f96c..6bf3b3130 100644 --- a/.nojekyll +++ b/.nojekyll @@ -1 +1 @@ -516dd7c4 \ No newline at end of file +4dbb58e6 \ No newline at end of file diff --git a/FAQS.html b/FAQS.html index d1ce8783d..78f29e97f 100644 --- a/FAQS.html +++ b/FAQS.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/TODO.html b/TODO.html index 25661c7b3..cfbad5764 100644 --- a/TODO.html +++ b/TODO.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/amd_hpc.html b/docs/amd_hpc.html index 5ba65e461..767bf66f7 100644 --- a/docs/amd_hpc.html +++ b/docs/amd_hpc.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.args.html b/docs/api/cli.args.html index fa5e8454f..19e09f27e 100644 --- a/docs/api/cli.args.html +++ b/docs/api/cli.args.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.checks.html b/docs/api/cli.checks.html index 10d377250..2eb10a5a3 100644 --- a/docs/api/cli.checks.html +++ b/docs/api/cli.checks.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.cloud.base.html b/docs/api/cli.cloud.base.html index 97b51f73a..50551c6a8 100644 --- a/docs/api/cli.cloud.base.html +++ b/docs/api/cli.cloud.base.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.cloud.modal_.html b/docs/api/cli.cloud.modal_.html index f73e5e5e7..a781896a2 100644 --- a/docs/api/cli.cloud.modal_.html +++ b/docs/api/cli.cloud.modal_.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.config.html b/docs/api/cli.config.html index 7ce649ae0..bbc7def09 100644 --- a/docs/api/cli.config.html +++ b/docs/api/cli.config.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.evaluate.html b/docs/api/cli.evaluate.html index 264e66de4..88164452c 100644 --- a/docs/api/cli.evaluate.html +++ b/docs/api/cli.evaluate.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.inference.html b/docs/api/cli.inference.html index 21f2ba8b2..313c94064 100644 --- a/docs/api/cli.inference.html +++ b/docs/api/cli.inference.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.main.html b/docs/api/cli.main.html index e05d5f57c..d9b4b3f2b 100644 --- a/docs/api/cli.main.html +++ b/docs/api/cli.main.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.merge_lora.html b/docs/api/cli.merge_lora.html index 390295155..9d83d94e6 100644 --- a/docs/api/cli.merge_lora.html +++ b/docs/api/cli.merge_lora.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.merge_sharded_fsdp_weights.html b/docs/api/cli.merge_sharded_fsdp_weights.html index 1f42cf0d5..3185d326e 100644 --- a/docs/api/cli.merge_sharded_fsdp_weights.html +++ b/docs/api/cli.merge_sharded_fsdp_weights.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.preprocess.html b/docs/api/cli.preprocess.html index edadf8c3d..e04af3941 100644 --- a/docs/api/cli.preprocess.html +++ b/docs/api/cli.preprocess.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.quantize.html b/docs/api/cli.quantize.html index f937f52f1..02f17a52d 100644 --- a/docs/api/cli.quantize.html +++ b/docs/api/cli.quantize.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.sweeps.html b/docs/api/cli.sweeps.html index 8949cf72b..d34f91f2b 100644 --- a/docs/api/cli.sweeps.html +++ b/docs/api/cli.sweeps.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.train.html b/docs/api/cli.train.html index 60ebcfbc2..dd8d3a266 100644 --- a/docs/api/cli.train.html +++ b/docs/api/cli.train.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.utils.html b/docs/api/cli.utils.html index 0f3b56614..5a11a541f 100644 --- a/docs/api/cli.utils.html +++ b/docs/api/cli.utils.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/cli.vllm_serve.html b/docs/api/cli.vllm_serve.html index 106399499..afdadb158 100644 --- a/docs/api/cli.vllm_serve.html +++ b/docs/api/cli.vllm_serve.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/common.architectures.html b/docs/api/common.architectures.html index c780a05f7..2fac29e9f 100644 --- a/docs/api/common.architectures.html +++ b/docs/api/common.architectures.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/common.const.html b/docs/api/common.const.html index c201d3460..63c615bbc 100644 --- a/docs/api/common.const.html +++ b/docs/api/common.const.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/common.datasets.html b/docs/api/common.datasets.html index 70a60e8d8..ccec2c9c6 100644 --- a/docs/api/common.datasets.html +++ b/docs/api/common.datasets.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/convert.html b/docs/api/convert.html index 87ac1d6c4..b3410dfc9 100644 --- a/docs/api/convert.html +++ b/docs/api/convert.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.builders.base.html b/docs/api/core.builders.base.html index f20c5e977..94dcd7400 100644 --- a/docs/api/core.builders.base.html +++ b/docs/api/core.builders.base.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.builders.causal.html b/docs/api/core.builders.causal.html index 9bc6468d6..5bac450b8 100644 --- a/docs/api/core.builders.causal.html +++ b/docs/api/core.builders.causal.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.builders.rl.html b/docs/api/core.builders.rl.html index a3a81a62d..925b10e06 100644 --- a/docs/api/core.builders.rl.html +++ b/docs/api/core.builders.rl.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.chat.format.chatml.html b/docs/api/core.chat.format.chatml.html index 4e15baf36..8375b0cb3 100644 --- a/docs/api/core.chat.format.chatml.html +++ b/docs/api/core.chat.format.chatml.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.chat.format.llama3x.html b/docs/api/core.chat.format.llama3x.html index 59ade565b..d407e5bed 100644 --- a/docs/api/core.chat.format.llama3x.html +++ b/docs/api/core.chat.format.llama3x.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.chat.format.shared.html b/docs/api/core.chat.format.shared.html index 438686244..8e38c0a9c 100644 --- a/docs/api/core.chat.format.shared.html +++ b/docs/api/core.chat.format.shared.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.chat.messages.html b/docs/api/core.chat.messages.html index 4b8b5ca46..0f80350f1 100644 --- a/docs/api/core.chat.messages.html +++ b/docs/api/core.chat.messages.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.datasets.chat.html b/docs/api/core.datasets.chat.html index 6b56728ff..c863fe794 100644 --- a/docs/api/core.datasets.chat.html +++ b/docs/api/core.datasets.chat.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.datasets.transforms.chat_builder.html b/docs/api/core.datasets.transforms.chat_builder.html index b8a78a726..4c05457be 100644 --- a/docs/api/core.datasets.transforms.chat_builder.html +++ b/docs/api/core.datasets.transforms.chat_builder.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.trainers.base.html b/docs/api/core.trainers.base.html index 0080dfff3..7e859dca7 100644 --- a/docs/api/core.trainers.base.html +++ b/docs/api/core.trainers.base.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.trainers.dpo.trainer.html b/docs/api/core.trainers.dpo.trainer.html index fb1a70fde..e71d5cdb1 100644 --- a/docs/api/core.trainers.dpo.trainer.html +++ b/docs/api/core.trainers.dpo.trainer.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.trainers.grpo.sampler.html b/docs/api/core.trainers.grpo.sampler.html index 66abf32ee..6184cac18 100644 --- a/docs/api/core.trainers.grpo.sampler.html +++ b/docs/api/core.trainers.grpo.sampler.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.trainers.grpo.trainer.html b/docs/api/core.trainers.grpo.trainer.html index 194004ae5..245be3a23 100644 --- a/docs/api/core.trainers.grpo.trainer.html +++ b/docs/api/core.trainers.grpo.trainer.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.trainers.mamba.html b/docs/api/core.trainers.mamba.html index d93f5e89d..58bd6f96f 100644 --- a/docs/api/core.trainers.mamba.html +++ b/docs/api/core.trainers.mamba.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.trainers.mixins.optimizer.html b/docs/api/core.trainers.mixins.optimizer.html index 0829db72d..9456635cc 100644 --- a/docs/api/core.trainers.mixins.optimizer.html +++ b/docs/api/core.trainers.mixins.optimizer.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.trainers.mixins.rng_state_loader.html b/docs/api/core.trainers.mixins.rng_state_loader.html index de401d224..e3acd309b 100644 --- a/docs/api/core.trainers.mixins.rng_state_loader.html +++ b/docs/api/core.trainers.mixins.rng_state_loader.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.trainers.mixins.scheduler.html b/docs/api/core.trainers.mixins.scheduler.html index c7e961669..d7f775622 100644 --- a/docs/api/core.trainers.mixins.scheduler.html +++ b/docs/api/core.trainers.mixins.scheduler.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.trainers.relora.html b/docs/api/core.trainers.relora.html index b9fa2fbe0..36b05c5dc 100644 --- a/docs/api/core.trainers.relora.html +++ b/docs/api/core.trainers.relora.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.trainers.trl.html b/docs/api/core.trainers.trl.html index 9431099e4..9eeec002a 100644 --- a/docs/api/core.trainers.trl.html +++ b/docs/api/core.trainers.trl.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.trainers.utils.html b/docs/api/core.trainers.utils.html index 5d866cb7a..a2116b56c 100644 --- a/docs/api/core.trainers.utils.html +++ b/docs/api/core.trainers.utils.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/core.training_args.html b/docs/api/core.training_args.html index 59657e992..c76f10731 100644 --- a/docs/api/core.training_args.html +++ b/docs/api/core.training_args.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/datasets.html b/docs/api/datasets.html index 1d039d742..c9188bc01 100644 --- a/docs/api/datasets.html +++ b/docs/api/datasets.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/evaluate.html b/docs/api/evaluate.html index 4f43cdb3d..5d8ce068a 100644 --- a/docs/api/evaluate.html +++ b/docs/api/evaluate.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/index.html b/docs/api/index.html index c77229a9f..605622b07 100644 --- a/docs/api/index.html +++ b/docs/api/index.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/integrations.base.html b/docs/api/integrations.base.html index b35ff43a9..da10959af 100644 --- a/docs/api/integrations.base.html +++ b/docs/api/integrations.base.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/integrations.cut_cross_entropy.args.html b/docs/api/integrations.cut_cross_entropy.args.html index 1175a30e7..1087f6e98 100644 --- a/docs/api/integrations.cut_cross_entropy.args.html +++ b/docs/api/integrations.cut_cross_entropy.args.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/integrations.grokfast.optimizer.html b/docs/api/integrations.grokfast.optimizer.html index e89710842..ee7f304b5 100644 --- a/docs/api/integrations.grokfast.optimizer.html +++ b/docs/api/integrations.grokfast.optimizer.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/integrations.kd.trainer.html b/docs/api/integrations.kd.trainer.html index 62be715c1..49cf24b27 100644 --- a/docs/api/integrations.kd.trainer.html +++ b/docs/api/integrations.kd.trainer.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/integrations.liger.args.html b/docs/api/integrations.liger.args.html index 31ec09681..d5359718d 100644 --- a/docs/api/integrations.liger.args.html +++ b/docs/api/integrations.liger.args.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/integrations.lm_eval.args.html b/docs/api/integrations.lm_eval.args.html index 35957bc2c..89dacf494 100644 --- a/docs/api/integrations.lm_eval.args.html +++ b/docs/api/integrations.lm_eval.args.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/integrations.spectrum.args.html b/docs/api/integrations.spectrum.args.html index 410161671..01d7e2f9d 100644 --- a/docs/api/integrations.spectrum.args.html +++ b/docs/api/integrations.spectrum.args.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/kernels.geglu.html b/docs/api/kernels.geglu.html index 557912ff5..6d30cc005 100644 --- a/docs/api/kernels.geglu.html +++ b/docs/api/kernels.geglu.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/kernels.lora.html b/docs/api/kernels.lora.html index 57c2395cf..756746e31 100644 --- a/docs/api/kernels.lora.html +++ b/docs/api/kernels.lora.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/kernels.quantize.html b/docs/api/kernels.quantize.html index 7e8c0fa7b..66b340adc 100644 --- a/docs/api/kernels.quantize.html +++ b/docs/api/kernels.quantize.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/kernels.swiglu.html b/docs/api/kernels.swiglu.html index 3997ab683..bd61ee6e9 100644 --- a/docs/api/kernels.swiglu.html +++ b/docs/api/kernels.swiglu.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/kernels.utils.html b/docs/api/kernels.utils.html index c3a4b9c08..040f537a8 100644 --- a/docs/api/kernels.utils.html +++ b/docs/api/kernels.utils.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/loaders.adapter.html b/docs/api/loaders.adapter.html index c6f43073e..48d22b6fa 100644 --- a/docs/api/loaders.adapter.html +++ b/docs/api/loaders.adapter.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/loaders.constants.html b/docs/api/loaders.constants.html index 97e8a6f3c..deab48cb2 100644 --- a/docs/api/loaders.constants.html +++ b/docs/api/loaders.constants.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/loaders.model.html b/docs/api/loaders.model.html index 8e63f577b..8cc8b8017 100644 --- a/docs/api/loaders.model.html +++ b/docs/api/loaders.model.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/loaders.patch_manager.html b/docs/api/loaders.patch_manager.html index 87b73225e..c3f7a17ec 100644 --- a/docs/api/loaders.patch_manager.html +++ b/docs/api/loaders.patch_manager.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/loaders.processor.html b/docs/api/loaders.processor.html index 6ff2ec844..6dda1c2e4 100644 --- a/docs/api/loaders.processor.html +++ b/docs/api/loaders.processor.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/loaders.tokenizer.html b/docs/api/loaders.tokenizer.html index 9cb618e32..1bb4efa0c 100644 --- a/docs/api/loaders.tokenizer.html +++ b/docs/api/loaders.tokenizer.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/logging_config.html b/docs/api/logging_config.html index 002aab2d1..6c29ee0da 100644 --- a/docs/api/logging_config.html +++ b/docs/api/logging_config.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/models.mamba.modeling_mamba.html b/docs/api/models.mamba.modeling_mamba.html index f171e2d28..ff7d56a55 100644 --- a/docs/api/models.mamba.modeling_mamba.html +++ b/docs/api/models.mamba.modeling_mamba.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.btlm_attn_hijack_flash.html b/docs/api/monkeypatch.btlm_attn_hijack_flash.html index 62e9475c0..502d798b1 100644 --- a/docs/api/monkeypatch.btlm_attn_hijack_flash.html +++ b/docs/api/monkeypatch.btlm_attn_hijack_flash.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.data.batch_dataset_fetcher.html b/docs/api/monkeypatch.data.batch_dataset_fetcher.html index b8383165a..ad95300a0 100644 --- a/docs/api/monkeypatch.data.batch_dataset_fetcher.html +++ b/docs/api/monkeypatch.data.batch_dataset_fetcher.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.gradient_checkpointing.offload_cpu.html b/docs/api/monkeypatch.gradient_checkpointing.offload_cpu.html index 7d72ab336..35a9b2c70 100644 --- a/docs/api/monkeypatch.gradient_checkpointing.offload_cpu.html +++ b/docs/api/monkeypatch.gradient_checkpointing.offload_cpu.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + @@ -472,7 +478,6 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});
  • Classes
  • @@ -502,10 +507,6 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); CPU_Offloaded_Gradient_Checkpointer Saves VRAM by smartly offloading to RAM. - -CheckpointFunctionWithCPUOffload -This is a torch/utils/checkpoint.py CheckpointFunction monkey patch that offloads the first tensor to cpu during forward and back to cuda during backward. This allows significant memory savings when using a very long seqlen. e.g. for llama 8b at 100k it’s 24GB saved per gpu: ((100_000*4096)*2*32/2**30) -
    @@ -514,13 +515,6 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); )

    Saves VRAM by smartly offloading to RAM. Tiny hit to performance, since we mask the movement via non blocking calls.

    -
    -
    -

    CheckpointFunctionWithCPUOffload

    -
    monkeypatch.gradient_checkpointing.offload_cpu.CheckpointFunctionWithCPUOffload(
    -)
    -

    This is a torch/utils/checkpoint.py CheckpointFunction monkey patch that offloads the first tensor to cpu during forward and back to cuda during backward. This allows significant memory savings when using a very long seqlen. e.g. for llama 8b at 100k it’s 24GB saved per gpu: ((100_000*4096)*2*32/2**30) -In the case of a very long seqlen 100k+ the copying to/from cpu overhead is not big, because dense quadratic attention compute will dominate.

    diff --git a/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html b/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html index 8ba6fdb67..b4448fe77 100644 --- a/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html +++ b/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.llama_attn_hijack_flash.html b/docs/api/monkeypatch.llama_attn_hijack_flash.html index f3d7f1446..a40973f5a 100644 --- a/docs/api/monkeypatch.llama_attn_hijack_flash.html +++ b/docs/api/monkeypatch.llama_attn_hijack_flash.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.llama_attn_hijack_xformers.html b/docs/api/monkeypatch.llama_attn_hijack_xformers.html index 5e8847d23..63993099e 100644 --- a/docs/api/monkeypatch.llama_attn_hijack_xformers.html +++ b/docs/api/monkeypatch.llama_attn_hijack_xformers.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.llama_expand_mask.html b/docs/api/monkeypatch.llama_expand_mask.html index da955edaf..8160175e9 100644 --- a/docs/api/monkeypatch.llama_expand_mask.html +++ b/docs/api/monkeypatch.llama_expand_mask.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.llama_patch_multipack.html b/docs/api/monkeypatch.llama_patch_multipack.html index 917c4d404..cb8fca399 100644 --- a/docs/api/monkeypatch.llama_patch_multipack.html +++ b/docs/api/monkeypatch.llama_patch_multipack.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.lora_kernels.html b/docs/api/monkeypatch.lora_kernels.html index 21632ff01..ec1aab9de 100644 --- a/docs/api/monkeypatch.lora_kernels.html +++ b/docs/api/monkeypatch.lora_kernels.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.mistral_attn_hijack_flash.html b/docs/api/monkeypatch.mistral_attn_hijack_flash.html index 7a938c7f2..51e1cbd3e 100644 --- a/docs/api/monkeypatch.mistral_attn_hijack_flash.html +++ b/docs/api/monkeypatch.mistral_attn_hijack_flash.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.mixtral.html b/docs/api/monkeypatch.mixtral.html index aa6bb42a4..eb4d2fa34 100644 --- a/docs/api/monkeypatch.mixtral.html +++ b/docs/api/monkeypatch.mixtral.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.multipack.html b/docs/api/monkeypatch.multipack.html index de78ffad2..9492667e4 100644 --- a/docs/api/monkeypatch.multipack.html +++ b/docs/api/monkeypatch.multipack.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.relora.html b/docs/api/monkeypatch.relora.html index 8e908e55d..97112d92e 100644 --- a/docs/api/monkeypatch.relora.html +++ b/docs/api/monkeypatch.relora.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.stablelm_attn_hijack_flash.html b/docs/api/monkeypatch.stablelm_attn_hijack_flash.html index e6bc0d2ef..4cec7cf29 100644 --- a/docs/api/monkeypatch.stablelm_attn_hijack_flash.html +++ b/docs/api/monkeypatch.stablelm_attn_hijack_flash.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.trainer_fsdp_optim.html b/docs/api/monkeypatch.trainer_fsdp_optim.html index 77d268c21..82833c6fd 100644 --- a/docs/api/monkeypatch.trainer_fsdp_optim.html +++ b/docs/api/monkeypatch.trainer_fsdp_optim.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.transformers_fa_utils.html b/docs/api/monkeypatch.transformers_fa_utils.html index 291dc99b9..0075a3c3c 100644 --- a/docs/api/monkeypatch.transformers_fa_utils.html +++ b/docs/api/monkeypatch.transformers_fa_utils.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.unsloth_.html b/docs/api/monkeypatch.unsloth_.html index 708a3f5bf..09510b641 100644 --- a/docs/api/monkeypatch.unsloth_.html +++ b/docs/api/monkeypatch.unsloth_.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/monkeypatch.utils.html b/docs/api/monkeypatch.utils.html index 3b619c330..b9b26d711 100644 --- a/docs/api/monkeypatch.utils.html +++ b/docs/api/monkeypatch.utils.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.alpaca_chat.html b/docs/api/prompt_strategies.alpaca_chat.html index f8ad22d0a..6365b1a43 100644 --- a/docs/api/prompt_strategies.alpaca_chat.html +++ b/docs/api/prompt_strategies.alpaca_chat.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.alpaca_instruct.html b/docs/api/prompt_strategies.alpaca_instruct.html index 25a2cb696..a01645675 100644 --- a/docs/api/prompt_strategies.alpaca_instruct.html +++ b/docs/api/prompt_strategies.alpaca_instruct.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.alpaca_w_system.html b/docs/api/prompt_strategies.alpaca_w_system.html index 1968010b2..51cb9aa7d 100644 --- a/docs/api/prompt_strategies.alpaca_w_system.html +++ b/docs/api/prompt_strategies.alpaca_w_system.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.base.html b/docs/api/prompt_strategies.base.html index dc86c72be..1d21dac82 100644 --- a/docs/api/prompt_strategies.base.html +++ b/docs/api/prompt_strategies.base.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.bradley_terry.llama3.html b/docs/api/prompt_strategies.bradley_terry.llama3.html index e7808cc21..18a543e80 100644 --- a/docs/api/prompt_strategies.bradley_terry.llama3.html +++ b/docs/api/prompt_strategies.bradley_terry.llama3.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.chat_template.html b/docs/api/prompt_strategies.chat_template.html index ef903969d..5369fb8df 100644 --- a/docs/api/prompt_strategies.chat_template.html +++ b/docs/api/prompt_strategies.chat_template.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.completion.html b/docs/api/prompt_strategies.completion.html index d19f31d24..40a3f431b 100644 --- a/docs/api/prompt_strategies.completion.html +++ b/docs/api/prompt_strategies.completion.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.dpo.chat_template.html b/docs/api/prompt_strategies.dpo.chat_template.html index ded098538..7f1d8b890 100644 --- a/docs/api/prompt_strategies.dpo.chat_template.html +++ b/docs/api/prompt_strategies.dpo.chat_template.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.dpo.chatml.html b/docs/api/prompt_strategies.dpo.chatml.html index a4f8944c9..79a965b23 100644 --- a/docs/api/prompt_strategies.dpo.chatml.html +++ b/docs/api/prompt_strategies.dpo.chatml.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.dpo.llama3.html b/docs/api/prompt_strategies.dpo.llama3.html index 3c3e0ae7f..66f780854 100644 --- a/docs/api/prompt_strategies.dpo.llama3.html +++ b/docs/api/prompt_strategies.dpo.llama3.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.dpo.passthrough.html b/docs/api/prompt_strategies.dpo.passthrough.html index ccd32019f..af375f34f 100644 --- a/docs/api/prompt_strategies.dpo.passthrough.html +++ b/docs/api/prompt_strategies.dpo.passthrough.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.dpo.user_defined.html b/docs/api/prompt_strategies.dpo.user_defined.html index 0edfc1832..e88fc0ada 100644 --- a/docs/api/prompt_strategies.dpo.user_defined.html +++ b/docs/api/prompt_strategies.dpo.user_defined.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.dpo.zephyr.html b/docs/api/prompt_strategies.dpo.zephyr.html index 07da4ec58..c1bc746df 100644 --- a/docs/api/prompt_strategies.dpo.zephyr.html +++ b/docs/api/prompt_strategies.dpo.zephyr.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.input_output.html b/docs/api/prompt_strategies.input_output.html index 44058bf58..5a4a91c0d 100644 --- a/docs/api/prompt_strategies.input_output.html +++ b/docs/api/prompt_strategies.input_output.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.kto.chatml.html b/docs/api/prompt_strategies.kto.chatml.html index e09f06524..f86fd74fb 100644 --- a/docs/api/prompt_strategies.kto.chatml.html +++ b/docs/api/prompt_strategies.kto.chatml.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.kto.llama3.html b/docs/api/prompt_strategies.kto.llama3.html index dddddda8b..1489fa46a 100644 --- a/docs/api/prompt_strategies.kto.llama3.html +++ b/docs/api/prompt_strategies.kto.llama3.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.kto.user_defined.html b/docs/api/prompt_strategies.kto.user_defined.html index 3f8136dda..4ba053c08 100644 --- a/docs/api/prompt_strategies.kto.user_defined.html +++ b/docs/api/prompt_strategies.kto.user_defined.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.llama2_chat.html b/docs/api/prompt_strategies.llama2_chat.html index d0784fa21..fb9f46ebc 100644 --- a/docs/api/prompt_strategies.llama2_chat.html +++ b/docs/api/prompt_strategies.llama2_chat.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.messages.chat.html b/docs/api/prompt_strategies.messages.chat.html index 38d44195c..186b0aa41 100644 --- a/docs/api/prompt_strategies.messages.chat.html +++ b/docs/api/prompt_strategies.messages.chat.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.metharme.html b/docs/api/prompt_strategies.metharme.html index 596bd97d3..534e21e6a 100644 --- a/docs/api/prompt_strategies.metharme.html +++ b/docs/api/prompt_strategies.metharme.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.orcamini.html b/docs/api/prompt_strategies.orcamini.html index c33c7a0ce..0fc6f7308 100644 --- a/docs/api/prompt_strategies.orcamini.html +++ b/docs/api/prompt_strategies.orcamini.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.orpo.chat_template.html b/docs/api/prompt_strategies.orpo.chat_template.html index b1f197d60..5f8570c64 100644 --- a/docs/api/prompt_strategies.orpo.chat_template.html +++ b/docs/api/prompt_strategies.orpo.chat_template.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.pygmalion.html b/docs/api/prompt_strategies.pygmalion.html index 62829f526..ada34ee92 100644 --- a/docs/api/prompt_strategies.pygmalion.html +++ b/docs/api/prompt_strategies.pygmalion.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.stepwise_supervised.html b/docs/api/prompt_strategies.stepwise_supervised.html index e4a1e0f5c..58ea8bc14 100644 --- a/docs/api/prompt_strategies.stepwise_supervised.html +++ b/docs/api/prompt_strategies.stepwise_supervised.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_strategies.user_defined.html b/docs/api/prompt_strategies.user_defined.html index 49509057c..cf90b7d58 100644 --- a/docs/api/prompt_strategies.user_defined.html +++ b/docs/api/prompt_strategies.user_defined.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/prompt_tokenizers.html b/docs/api/prompt_tokenizers.html index ad0bab496..048f09107 100644 --- a/docs/api/prompt_tokenizers.html +++ b/docs/api/prompt_tokenizers.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/train.html b/docs/api/train.html index e7fbbd0d3..9101940f8 100644 --- a/docs/api/train.html +++ b/docs/api/train.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.bench.html b/docs/api/utils.bench.html index f63e9adf2..f6529ef39 100644 --- a/docs/api/utils.bench.html +++ b/docs/api/utils.bench.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.callbacks.comet_.html b/docs/api/utils.callbacks.comet_.html index 6793f84e8..1d0cbdd33 100644 --- a/docs/api/utils.callbacks.comet_.html +++ b/docs/api/utils.callbacks.comet_.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.callbacks.lisa.html b/docs/api/utils.callbacks.lisa.html index c3dc46f86..e71f18b08 100644 --- a/docs/api/utils.callbacks.lisa.html +++ b/docs/api/utils.callbacks.lisa.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.callbacks.mlflow_.html b/docs/api/utils.callbacks.mlflow_.html index f434f8df2..62016e681 100644 --- a/docs/api/utils.callbacks.mlflow_.html +++ b/docs/api/utils.callbacks.mlflow_.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.callbacks.perplexity.html b/docs/api/utils.callbacks.perplexity.html index 88f108763..8b5fa0686 100644 --- a/docs/api/utils.callbacks.perplexity.html +++ b/docs/api/utils.callbacks.perplexity.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.callbacks.profiler.html b/docs/api/utils.callbacks.profiler.html index bf3ed58e7..81e160f79 100644 --- a/docs/api/utils.callbacks.profiler.html +++ b/docs/api/utils.callbacks.profiler.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + @@ -505,7 +511,10 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

    PytorchProfilerCallback

    -
    utils.callbacks.profiler.PytorchProfilerCallback(steps_to_profile=5)
    +
    utils.callbacks.profiler.PytorchProfilerCallback(
    +    steps_to_profile=5,
    +    profiler_steps_start=0,
    +)

    PyTorch Profiler callback to create snapshots of GPU memory usage at specified steps.

    diff --git a/docs/api/utils.callbacks.qat.html b/docs/api/utils.callbacks.qat.html index e012890c6..4e45dd312 100644 --- a/docs/api/utils.callbacks.qat.html +++ b/docs/api/utils.callbacks.qat.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.chat_templates.html b/docs/api/utils.chat_templates.html index 9dc02aee3..5e7b67ddc 100644 --- a/docs/api/utils.chat_templates.html +++ b/docs/api/utils.chat_templates.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.collators.batching.html b/docs/api/utils.collators.batching.html index 8817e0e28..d6559a70b 100644 --- a/docs/api/utils.collators.batching.html +++ b/docs/api/utils.collators.batching.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.collators.core.html b/docs/api/utils.collators.core.html index 675f4d41a..6dccc6f15 100644 --- a/docs/api/utils.collators.core.html +++ b/docs/api/utils.collators.core.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.collators.mamba.html b/docs/api/utils.collators.mamba.html index 8fb12feb7..290a023e1 100644 --- a/docs/api/utils.collators.mamba.html +++ b/docs/api/utils.collators.mamba.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.collators.mm_chat.html b/docs/api/utils.collators.mm_chat.html index 6edcb45d4..831301c62 100644 --- a/docs/api/utils.collators.mm_chat.html +++ b/docs/api/utils.collators.mm_chat.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.ctx_managers.sequence_parallel.html b/docs/api/utils.ctx_managers.sequence_parallel.html index 6e094f018..6abb13384 100644 --- a/docs/api/utils.ctx_managers.sequence_parallel.html +++ b/docs/api/utils.ctx_managers.sequence_parallel.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.data.pretraining.html b/docs/api/utils.data.pretraining.html index 81637db01..0f674b2a3 100644 --- a/docs/api/utils.data.pretraining.html +++ b/docs/api/utils.data.pretraining.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.data.sft.html b/docs/api/utils.data.sft.html index 15996c6a7..f4c7f1bdd 100644 --- a/docs/api/utils.data.sft.html +++ b/docs/api/utils.data.sft.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.dict.html b/docs/api/utils.dict.html index 814c75091..b86088d16 100644 --- a/docs/api/utils.dict.html +++ b/docs/api/utils.dict.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.distributed.html b/docs/api/utils.distributed.html index 317682519..6e4930bb1 100644 --- a/docs/api/utils.distributed.html +++ b/docs/api/utils.distributed.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.freeze.html b/docs/api/utils.freeze.html index 4bf140986..c91a2a2e0 100644 --- a/docs/api/utils.freeze.html +++ b/docs/api/utils.freeze.html @@ -454,6 +454,12 @@ window.Quarto = { Sequence Parallelism + + diff --git a/docs/api/utils.lora.html b/docs/api/utils.lora.html index d00fc9201..c723d1872 100644 --- a/docs/api/utils.lora.html +++ b/docs/api/utils.lora.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.model_shard_quant.html b/docs/api/utils.model_shard_quant.html index 28fb54385..caf0ae9cb 100644 --- a/docs/api/utils.model_shard_quant.html +++ b/docs/api/utils.model_shard_quant.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.optimizers.adopt.html b/docs/api/utils.optimizers.adopt.html index 5efe16777..ca9f2df9b 100644 --- a/docs/api/utils.optimizers.adopt.html +++ b/docs/api/utils.optimizers.adopt.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.quantization.html b/docs/api/utils.quantization.html index cf94530bf..e405eb468 100644 --- a/docs/api/utils.quantization.html +++ b/docs/api/utils.quantization.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.samplers.multipack.html b/docs/api/utils.samplers.multipack.html index cf1779081..1983fe067 100644 --- a/docs/api/utils.samplers.multipack.html +++ b/docs/api/utils.samplers.multipack.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.schedulers.html b/docs/api/utils.schedulers.html index d49580ca8..1cd17e309 100644 --- a/docs/api/utils.schedulers.html +++ b/docs/api/utils.schedulers.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.schemas.config.html b/docs/api/utils.schemas.config.html index 84ab4dfc0..cf69beb7f 100644 --- a/docs/api/utils.schemas.config.html +++ b/docs/api/utils.schemas.config.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.schemas.datasets.html b/docs/api/utils.schemas.datasets.html index dc7955734..8498a9f34 100644 --- a/docs/api/utils.schemas.datasets.html +++ b/docs/api/utils.schemas.datasets.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.schemas.enums.html b/docs/api/utils.schemas.enums.html index 3ea4fb40a..04aa29d27 100644 --- a/docs/api/utils.schemas.enums.html +++ b/docs/api/utils.schemas.enums.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.schemas.integrations.html b/docs/api/utils.schemas.integrations.html index c1b37f53b..d574e9114 100644 --- a/docs/api/utils.schemas.integrations.html +++ b/docs/api/utils.schemas.integrations.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.schemas.model.html b/docs/api/utils.schemas.model.html index b40583f3e..02c4cbb5b 100644 --- a/docs/api/utils.schemas.model.html +++ b/docs/api/utils.schemas.model.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.schemas.multimodal.html b/docs/api/utils.schemas.multimodal.html index b46ae8018..43fd3f615 100644 --- a/docs/api/utils.schemas.multimodal.html +++ b/docs/api/utils.schemas.multimodal.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.schemas.peft.html b/docs/api/utils.schemas.peft.html index f78c8aa93..08163dca7 100644 --- a/docs/api/utils.schemas.peft.html +++ b/docs/api/utils.schemas.peft.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.schemas.training.html b/docs/api/utils.schemas.training.html index b3ab358aa..41496f837 100644 --- a/docs/api/utils.schemas.training.html +++ b/docs/api/utils.schemas.training.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.schemas.trl.html b/docs/api/utils.schemas.trl.html index 87df00f0f..1669481c1 100644 --- a/docs/api/utils.schemas.trl.html +++ b/docs/api/utils.schemas.trl.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.schemas.utils.html b/docs/api/utils.schemas.utils.html index d38c5cb88..03fc66166 100644 --- a/docs/api/utils.schemas.utils.html +++ b/docs/api/utils.schemas.utils.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.tokenization.html b/docs/api/utils.tokenization.html index 67fe5b4fb..eb9efe6f9 100644 --- a/docs/api/utils.tokenization.html +++ b/docs/api/utils.tokenization.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/api/utils.trainer.html b/docs/api/utils.trainer.html index b72ed36f5..9db279230 100644 --- a/docs/api/utils.trainer.html +++ b/docs/api/utils.trainer.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/batch_vs_grad.html b/docs/batch_vs_grad.html index 2a94e5226..53faa4f55 100644 --- a/docs/batch_vs_grad.html +++ b/docs/batch_vs_grad.html @@ -391,6 +391,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/cli.html b/docs/cli.html index ee9fc8778..caeab629c 100644 --- a/docs/cli.html +++ b/docs/cli.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/config-reference.html b/docs/config-reference.html index 18efb8ab3..bfa419e0e 100644 --- a/docs/config-reference.html +++ b/docs/config-reference.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + @@ -1148,585 +1154,591 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); # Number of high-loss steps in a row before the trainer aborts (default: 3) loss_watchdog_patience: int | None -gc_steps: int | None - -# Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection. -# require >=ampere -bf16: Literal['auto'] | bool | None = auto -# Use CUDA fp16 -fp16: bool | None -fp8: bool | None -# No AMP (automatic mixed precision) - require >=ampere -bfloat16: bool | None -# No AMP (automatic mixed precision) -float16: bool | None -# Use CUDA tf32 - require >=ampere -tf32: bool | None -float32: bool | None - -# Whether to use gradient checkpointing. Available options are: true, false, 'offload', -# 'offload_disk'. -# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing -gradient_checkpointing: Literal['offload', 'offload_disk'] | bool | None = False -# Additional kwargs to pass to the trainer for gradient checkpointing -gradient_checkpointing_kwargs: dict[str, Any] | None - -unfrozen_parameters: list[str] | None - -# The maximum length of an input to train with, this should typically be less than 2048 -# as most models have a token/context limit of 2048 -sequence_len: int = 512 -# The maximum length of an input for evaluation. If not specified, defaults to -# sequence_len -eval_sequence_len: int | None -min_sample_len: int | None -# maximum prompt length for RL training -max_prompt_len: int = 512 -# Use efficient multi-packing with block diagonal attention and per sequence -# position_ids. Recommend set to 'true' -sample_packing: bool | None -# The number of samples packed at a time. Increasing the following values helps with -# packing, but usually only slightly (<%1.) -sample_packing_group_size: int | None = 100000 -# The number of samples which can be packed into one sequence. Increase if using a large -# sequence_len with many short samples. -sample_packing_bin_size: int | None = 200 -# Whether to pack samples sequentially -sample_packing_sequentially: bool | None -# The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or -# 'forkserver' -sample_packing_mp_start_method: str | None -# Set to 'false' if getting errors during eval with sample_packing on -eval_sample_packing: bool | None -# Pad inputs so each step uses constant sized buffers. This will reduce memory -# fragmentation and may prevent OOMs, by re-using memory more efficiently -pad_to_sequence_len: bool | None -# Whether to use sequential sampling for curriculum learning -curriculum_sampling: bool | None -multipack_real_batches: bool | None -# whether to concatenate samples during pretraining -pretraining_sample_concatenation: bool | None - -# Use batch flattening for speedups when not using sample_packing -batch_flattening: Literal['auto'] | bool | None - -use_pose: bool | None -pose_split_on_token_ids: list[int] | None -pose_max_context_len: int | None -pose_num_chunks: int | None - -pretrain_multipack_buffer_size: int | None = 10000 -# whether to prevent cross attention for packed sequences during pretraining -pretrain_multipack_attn: bool | None = True +# Run garbage collection every `gc_steps` steps. -1 will run on epoch end and before +# evaluations. Default is 0 (disabled). +gc_steps: int | None + +# Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection. +# require >=ampere +bf16: Literal['auto'] | bool | None = auto +# Use CUDA fp16 +fp16: bool | None +fp8: bool | None +# No AMP (automatic mixed precision) - require >=ampere +bfloat16: bool | None +# No AMP (automatic mixed precision) +float16: bool | None +# Use CUDA tf32 - require >=ampere +tf32: bool | None +float32: bool | None + +# Whether to use gradient checkpointing. Available options are: true, false, 'offload', +# 'offload_disk'. +# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing +gradient_checkpointing: Literal['offload', 'offload_disk'] | bool | None = False +# Additional kwargs to pass to the trainer for gradient checkpointing +gradient_checkpointing_kwargs: dict[str, Any] | None +# Whether to offload activations. Available options are: true, false, 'legacy', 'disk'. +activation_offloading: Literal['legacy', 'disk'] | bool | None = False + +unfrozen_parameters: list[str] | None + +# The maximum length of an input to train with, this should typically be less than 2048 +# as most models have a token/context limit of 2048 +sequence_len: int = 512 +# The maximum length of an input for evaluation. If not specified, defaults to +# sequence_len +eval_sequence_len: int | None +min_sample_len: int | None +# maximum prompt length for RL training +max_prompt_len: int = 512 +# Use efficient multi-packing with block diagonal attention and per sequence +# position_ids. Recommend set to 'true' +sample_packing: bool | None +# The number of samples packed at a time. Increasing the following values helps with +# packing, but usually only slightly (<%1.) +sample_packing_group_size: int | None = 100000 +# The number of samples which can be packed into one sequence. Increase if using a large +# sequence_len with many short samples. +sample_packing_bin_size: int | None = 200 +# Whether to pack samples sequentially +sample_packing_sequentially: bool | None +# The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or +# 'forkserver' +sample_packing_mp_start_method: str | None +# Set to 'false' if getting errors during eval with sample_packing on +eval_sample_packing: bool | None +# Pad inputs so each step uses constant sized buffers. This will reduce memory +# fragmentation and may prevent OOMs, by re-using memory more efficiently +pad_to_sequence_len: bool | None +# Whether to use sequential sampling for curriculum learning +curriculum_sampling: bool | None +multipack_real_batches: bool | None +# whether to concatenate samples during pretraining +pretraining_sample_concatenation: bool | None + +# Use batch flattening for speedups when not using sample_packing +batch_flattening: Literal['auto'] | bool | None + +use_pose: bool | None +pose_split_on_token_ids: list[int] | None +pose_max_context_len: int | None +pose_num_chunks: int | None -# Whether to use xformers attention patch https://github.com/facebookresearch/xformers -xformers_attention: bool | None -# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/ -# torch.nn.functional.scaled_dot_product_attention.html -sdp_attention: bool | None -# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf -s2_attention: bool | None -flex_attention: bool | None -flex_attn_compile_kwargs: dict[str, Any] | None -# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention -flash_attention: bool | None -# Whether to use flash-attention cross entropy implementation - advanced use only -flash_attn_cross_entropy: bool | None -# Whether to use flash-attention rms norm implementation - advanced use only -flash_attn_rms_norm: bool | None -# Whether to fuse QKV into a single operation -flash_attn_fuse_qkv: bool | None -# Whether to fuse part of the MLP into a single operation -flash_attn_fuse_mlp: bool | None -# Whether to use bettertransformers -flash_optimum: bool | None - -eager_attention: bool | None - -unsloth_cross_entropy_loss: bool | None -unsloth_lora_mlp: bool | None -unsloth_lora_qkv: bool | None -unsloth_lora_o: bool | None -unsloth_rms_norm: bool | None -unsloth_rope: bool | None - -# Apply custom LoRA autograd functions and activation function Triton kernels for speed -# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html -lora_mlp_kernel: bool | None -# Apply custom LoRA autograd functions and activation function Triton kernels for speed -# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html -lora_qkv_kernel: bool | None -# Apply custom LoRA autograd functions and activation function Triton kernels for speed -# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html -lora_o_kernel: bool | None - -# Whether to use chunked cross entropy loss for memory efficiency -chunked_cross_entropy: bool | None -# Number of chunks to use for chunked cross entropy loss -chunked_cross_entropy_num_chunks: int | None - -# Whether to use ALST tiled mlp for memory efficient long context -tiled_mlp: bool | None - -# Number of shards to use for ALST tiled mlp. If unset, it will be set based on -# seqlen/hidden_size -tiled_mlp_num_shards: int | None +pretrain_multipack_buffer_size: int | None = 10000 +# whether to prevent cross attention for packed sequences during pretraining +pretrain_multipack_attn: bool | None = True + +# Whether to use xformers attention patch https://github.com/facebookresearch/xformers +xformers_attention: bool | None +# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/ +# torch.nn.functional.scaled_dot_product_attention.html +sdp_attention: bool | None +# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf +s2_attention: bool | None +flex_attention: bool | None +flex_attn_compile_kwargs: dict[str, Any] | None +# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention +flash_attention: bool | None +# Whether to use flash-attention cross entropy implementation - advanced use only +flash_attn_cross_entropy: bool | None +# Whether to use flash-attention rms norm implementation - advanced use only +flash_attn_rms_norm: bool | None +# Whether to fuse QKV into a single operation +flash_attn_fuse_qkv: bool | None +# Whether to fuse part of the MLP into a single operation +flash_attn_fuse_mlp: bool | None +# Whether to use bettertransformers +flash_optimum: bool | None + +eager_attention: bool | None + +unsloth_cross_entropy_loss: bool | None +unsloth_lora_mlp: bool | None +unsloth_lora_qkv: bool | None +unsloth_lora_o: bool | None +unsloth_rms_norm: bool | None +unsloth_rope: bool | None + +# Apply custom LoRA autograd functions and activation function Triton kernels for speed +# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html +lora_mlp_kernel: bool | None +# Apply custom LoRA autograd functions and activation function Triton kernels for speed +# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html +lora_qkv_kernel: bool | None +# Apply custom LoRA autograd functions and activation function Triton kernels for speed +# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html +lora_o_kernel: bool | None + +# Whether to use chunked cross entropy loss for memory efficiency +chunked_cross_entropy: bool | None +# Number of chunks to use for chunked cross entropy loss +chunked_cross_entropy_num_chunks: int | None + +# Whether to use ALST tiled mlp for memory efficient long context +tiled_mlp: bool | None -llama4_linearized_experts: bool | None - -# Deepspeed config path. e.g., deepspeed_configs/zero3.json -deepspeed: str | dict[str, Any] | None -# FSDP configuration -fsdp: list[str] | None - -# FSDP configuration options -fsdp_config: dict[str, Any] | None -# FSDP version -fsdp_version: int | None -fsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None - -# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for -# no eval. -val_set_size: float | None = 0.0 +# Number of shards to use for ALST tiled mlp. If unset, it will be set based on +# seqlen/hidden_size +tiled_mlp_num_shards: int | None + +llama4_linearized_experts: bool | None + +# Deepspeed config path. e.g., deepspeed_configs/zero3.json +deepspeed: str | dict[str, Any] | None +# FSDP configuration +fsdp: list[str] | None + +# FSDP configuration options +fsdp_config: dict[str, Any] | None +# FSDP version +fsdp_version: int | None +fsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None -# Set to a divisor of the number of GPUs available to split sequences into chunks of -# equal size. Use in long context training to prevent OOM when sequences cannot fit into -# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each -# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized -# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more -# details. -sequence_parallel_degree: int | None -# Optional; strides across the key dimension. Larger values use more memory but should -# make training faster. Must evenly divide the number of KV heads in your model. -heads_k_stride: int | None -# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to -# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing -# case. -ring_attn_func: RingAttnFunc | None - -# Add or change special tokens. If you add tokens here, you don't need to add them to -# the `tokens` list. -special_tokens: SpecialTokensConfig | None - # For SpecialTokensConfig: - bos_token: str | None - eos_token: str | None - pad_token: str | None - unk_token: str | None - additional_special_tokens: list[str] | None - -# Add extra tokens to the tokenizer -tokens: list[str] | None -# Mapping token_id to new_token_string to override reserved added_tokens in the -# tokenizer. Only works for tokens that are not part of the base vocab (aka are -# added_tokens). Can be checked if they exist in tokenizer.json added_tokens. -added_tokens_overrides: dict[int, str] | None - -# Whether to use torch.compile and which backend to use. setting to `auto` will enable -# torch compile when torch>=2.6.0 -torch_compile: Literal['auto'] | bool | None -# Backend to use for torch.compile -torch_compile_backend: str | None -torch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None - -# Maximum number of iterations to train for. It precedes num_epochs which means that if -# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps => -# `num_epochs: 2` and `max_steps: 100` will train for 100 steps -max_steps: int | None -# Number of warmup steps. Cannot use with warmup_ratio -warmup_steps: int | None -# Warmup ratio. Cannot use with warmup_steps -warmup_ratio: float | None -# Leave empty to eval at each epoch, integer for every N steps. float for fraction of -# total steps -eval_steps: int | float | None -# Number of times per epoch to run evals, mutually exclusive with eval_steps -evals_per_epoch: int | None -# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer -# from `eval_steps` -eval_strategy: str | None -# Leave empty to save at each epoch, integer for every N steps. float for fraction of -# total steps -save_steps: int | float | None -# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps -saves_per_epoch: int | None -# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better -# result is achieved, leave empty to infer from `save_steps` -save_strategy: str | None -# Checkpoints saved at a time -save_total_limit: int | None -# Logging frequency -logging_steps: int | None -# Stop training after this many evaluation losses have increased in a row. https://huggi -# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin -# gCallback -early_stopping_patience: int | None -load_best_model_at_end: bool | None = False -# Save only the model weights, skipping the optimizer. Using this means you can't resume -# from checkpoints. -save_only_model: bool | None = False -# Use tensorboard for logging -use_tensorboard: bool | None -# Enable the pytorch profiler to capture the first N steps of training to the -# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more -# information. Snapshots can be visualized @ https://pytorch.org/memory_viz -profiler_steps: int | None -# bool of whether to include tokens trainer per second in the training metrics. This -# iterates over the entire dataset once, so it takes some time. -include_tokens_per_second: bool | None - -# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to -# add noise to embeddings. Currently only supported on Llama and Mistral -neftune_noise_alpha: float | None - -# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to -# `beta` in `ORPOConfig` due to trl mapping. -orpo_alpha: float | None -# Weighting of NLL term in loss from RPO paper -rpo_alpha: float | None -# Target reward margin for the SimPO loss -simpo_gamma: float | None -# Weight of the BC regularizer -cpo_alpha: float | None - -# Factor for desirable loss term in KTO loss -kto_desirable_weight: float | None -# Factor for undesirable loss term in KTO loss -kto_undesirable_weight: float | None -# The beta parameter for the RL training -rl_beta: float | None - -# Defines the max memory usage per gpu on the system. Passed through to transformers -# when loading the model. -max_memory: dict[int | Literal['cpu', 'disk'], int | str] | None -# Limit the memory for all available GPUs to this amount (if an integer, expressed in -# gigabytes); default: unset -gpu_memory_limit: int | str | None -# Whether to use low_cpu_mem_usage -low_cpu_mem_usage: bool | None - -# The name of the chat template to use for training, following values are supported: -# tokenizer_default: Uses the chat template that is available in the -# tokenizer_config.json. If the chat template is not available in the tokenizer, it will -# raise an error. This is the default value. -# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates -# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py. -# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. -# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not -# available in the tokenizer. jinja: Uses a custom jinja template for the chat template. -# The custom jinja template should be provided in the chat_template_jinja field. The -# selected chat template will be saved to the tokenizer_config.json for easier -# inferencing -chat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None -# Custom jinja template or path to jinja file for chat template. This will be only used -# if chat_template is set to `jinja` or `null` (in which case chat_template is -# automatically set to `jinja`). Default is null. -chat_template_jinja: str | None -# Additional kwargs to pass to the chat template. This is useful for customizing the -# chat template. For example, you can pass `thinking=False` to add a generation prompt -# to the chat template. -chat_template_kwargs: dict[str, Any] | None -# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the -# boundaries between conversation turns. For example: ['/INST', '</s>', -# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is -# useful for templates that use multiple delimiter tokens. -eot_tokens: list[str] | None -# Changes the default system message. Currently only supports chatml. -default_system_message: str | None - -fix_untrained_tokens: int | list[int] | None - -is_preprocess: bool | None -preprocess_iterable: bool | None - -# Total number of tokens - internal use -total_num_tokens: int | None -total_supervised_tokens: int | None -# You can set these packing optimizations AFTER starting a training at least once. The -# trainer will provide recommended values for these values. -sample_packing_eff_est: float | None -axolotl_config_path: str | None - -# Internal use only - Used to identify which the model is based on -is_falcon_derived_model: bool | None -# Internal use only - Used to identify which the model is based on -is_llama_derived_model: bool | None -# Internal use only - Used to identify which the model is based on. Please note that if -# you set this to true, `padding_side` will be set to 'left' by default -is_mistral_derived_model: bool | None -# Internal use only - Used to identify which the model is based on -is_qwen_derived_model: bool | None - -# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available -# plugins or doc below for more details. -# https://docs.axolotl.ai/docs/custom_integrations.html -plugins: list[str] | None - -# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This -# can also be a relative path to a model on disk -base_model: str (required) -# If the base_model repo on hf hub doesn't include configuration .json files, You can -# set that here, or leave this empty to default to base_model -base_model_config: str | None -cls_model_config: str | None -# Optional tokenizer configuration path in case you want to use a different tokenizer -# than the one defined in the base model -tokenizer_config: str | None -# use_fast option for tokenizer loading from_pretrained, default to True -tokenizer_use_fast: bool | None -# Whether to use the legacy tokenizer setting, defaults to True -tokenizer_legacy: bool | None -# Whether to use mistral-common tokenizer. If set to True, it will use the mistral- -# common tokenizer. -tokenizer_use_mistral_common: bool | None -# Corresponding tokenizer for the model AutoTokenizer is a good choice -tokenizer_type: str | None -# transformers processor class -processor_type: str | None -# Trust remote code for untrusted source -trust_remote_code: bool | None - -# Where to save the full-finetuned model to -output_dir: str = ./model-out -# push checkpoints to hub -hub_model_id: str | None -# how to push checkpoints to hub -hub_strategy: str | None -# Save model as safetensors (require safetensors package). Default True -save_safetensors: bool | None = True - -# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer -load_in_8bit: bool | None = False -# Use bitsandbytes 4 bit -load_in_4bit: bool | None = False - -# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in -# original model -adapter: str | None -# If you already have a lora model trained that you want to load, put that here. This -# means after training, if you want to test the model, you should set this to the value -# of `output_dir`. Note that if you merge an adapter to the base model, a new -# subdirectory `merged` will be created under the `output_dir`. -lora_model_dir: str | None -lora_r: int | None -lora_alpha: int | None -lora_fan_in_fan_out: bool | None -lora_target_modules: str | list[str] | None -# If true, will target all linear modules -lora_target_linear: bool | None -# If you added new tokens to the tokenizer, you may need to save some LoRA modules -# because they need to know the new tokens. For LLaMA and Mistral, you need to save -# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts -# tokens to embeddings, and `lm_head` converts embeddings to token probabilities. -lora_modules_to_save: list[str] | None -lora_dropout: float | None = 0.0 -# The layer indices to transform, otherwise, apply to all layers -peft_layers_to_transform: list[int] | None -peft_layers_pattern: list[str] | None - -peft: PeftConfig | None - # For PeftConfig: - # Configuration options for loftq initialization for LoRA - loftq_config: LoftQConfig | None - # For LoftQConfig: - # typically 4 bits - loftq_bits: int = 4 - -# Whether to use DoRA. -peft_use_dora: bool | None -# Whether to use RSLoRA. -peft_use_rslora: bool | None -# List of layer indices to replicate. -peft_layer_replication: list[tuple[int, int]] | None -# How to initialize LoRA weights. Default to True which is MS original implementation. -peft_init_lora_weights: bool | str | None - -# load qlora model in sharded format for FSDP using answer.ai technique. -qlora_sharded_model_loading: bool | None = False -# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it -# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge -lora_on_cpu: bool | None -# Whether you are training a 4-bit GPTQ quantized model -gptq: bool | None -# optional overrides to the bnb 4bit quantization configuration -bnb_config_kwargs: dict[str, Any] | None - -# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4. -loraplus_lr_ratio: float | None -# loraplus learning rate for lora embedding layers. Default value is 1e-6. -loraplus_lr_embedding: float | None = 1e-06 - -merge_lora: bool | None - -# Number of steps per ReLoRA restart -relora_steps: int | None -# Number of per-restart warmup steps -relora_warmup_steps: int | None -# Number of anneal steps for each relora cycle -relora_anneal_steps: int | None -# threshold for optimizer magnitude when pruning -relora_prune_ratio: float | None -# True to perform lora weight merges on cpu during restarts, for modest gpu memory -# savings -relora_cpu_offload: bool | None - -# If greater than 1, backpropagation will be skipped and the gradients will be -# accumulated for the given number of steps. -gradient_accumulation_steps: int | None = 1 -# The number of samples to include in each batch. This is the number of samples sent to -# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps -micro_batch_size: int | None = 1 -# Total batch size, we do not recommended setting this manually -batch_size: int | None -# per gpu micro batch size for evals, defaults to value of micro_batch_size -eval_batch_size: int | None - -# whether to find batch size that fits in memory. Passed to underlying transformers -# Trainer -auto_find_batch_size: bool | None - -# Whether to mask out or include the human's prompt from the training labels -train_on_inputs: bool | None = False -# Group similarly sized data to minimize padding. May be slower to start, as it must -# download and sort the entire dataset. Note that training loss may have an oscillating -# pattern with this enabled. -group_by_length: bool | None - -learning_rate: str | float (required) -embedding_lr: float | None -embedding_lr_scale: float | None -# Specify weight decay -weight_decay: float | None = 0.0 -# Specify optimizer -optimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED -# Dictionary of arguments to pass to the optimizer -optim_args: str | dict[str, Any] | None -# The target modules to optimize, i.e. the module names that you would like to train, -# right now this is used only for GaLore algorithm -optim_target_modules: list[str] | Literal['all_linear'] | None -# Path to torch distx for optim 'adamw_anyprecision' -torchdistx_path: str | None -lr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE -# Specify a scheduler and kwargs to use with the optimizer -lr_scheduler_kwargs: dict[str, Any] | None -lr_quadratic_warmup: bool | None -# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of -# peak lr -cosine_min_lr_ratio: float | None -# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means -# start cosine_min_lr at 80% of training step -cosine_constant_lr_ratio: float | None -# Learning rate div factor -lr_div_factor: float | None - -lr_groups: list[LrGroup] | None - # For LrGroup: - name: str (required) - modules: list[str] (required) - lr: float (required) +# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for +# no eval. +val_set_size: float | None = 0.0 + +# Set to a divisor of the number of GPUs available to split sequences into chunks of +# equal size. Use in long context training to prevent OOM when sequences cannot fit into +# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each +# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized +# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more +# details. +sequence_parallel_degree: int | None +# Optional; strides across the key dimension. Larger values use more memory but should +# make training faster. Must evenly divide the number of KV heads in your model. +heads_k_stride: int | None +# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to +# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing +# case. +ring_attn_func: RingAttnFunc | None + +# Add or change special tokens. If you add tokens here, you don't need to add them to +# the `tokens` list. +special_tokens: SpecialTokensConfig | None + # For SpecialTokensConfig: + bos_token: str | None + eos_token: str | None + pad_token: str | None + unk_token: str | None + additional_special_tokens: list[str] | None + +# Add extra tokens to the tokenizer +tokens: list[str] | None +# Mapping token_id to new_token_string to override reserved added_tokens in the +# tokenizer. Only works for tokens that are not part of the base vocab (aka are +# added_tokens). Can be checked if they exist in tokenizer.json added_tokens. +added_tokens_overrides: dict[int, str] | None + +# Whether to use torch.compile and which backend to use. setting to `auto` will enable +# torch compile when torch>=2.6.0 +torch_compile: Literal['auto'] | bool | None +# Backend to use for torch.compile +torch_compile_backend: str | None +torch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None + +# Maximum number of iterations to train for. It precedes num_epochs which means that if +# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps => +# `num_epochs: 2` and `max_steps: 100` will train for 100 steps +max_steps: int | None +# Number of warmup steps. Cannot use with warmup_ratio +warmup_steps: int | None +# Warmup ratio. Cannot use with warmup_steps +warmup_ratio: float | None +# Leave empty to eval at each epoch, integer for every N steps. float for fraction of +# total steps +eval_steps: int | float | None +# Number of times per epoch to run evals, mutually exclusive with eval_steps +evals_per_epoch: int | None +# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer +# from `eval_steps` +eval_strategy: str | None +# Leave empty to save at each epoch, integer for every N steps. float for fraction of +# total steps +save_steps: int | float | None +# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps +saves_per_epoch: int | None +# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better +# result is achieved, leave empty to infer from `save_steps` +save_strategy: str | None +# Checkpoints saved at a time +save_total_limit: int | None +# Logging frequency +logging_steps: int | None +# Stop training after this many evaluation losses have increased in a row. https://huggi +# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin +# gCallback +early_stopping_patience: int | None +load_best_model_at_end: bool | None = False +# Save only the model weights, skipping the optimizer. Using this means you can't resume +# from checkpoints. +save_only_model: bool | None = False +# Use tensorboard for logging +use_tensorboard: bool | None +# Enable the pytorch profiler to capture the first N steps of training to the +# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more +# information. Snapshots can be visualized @ https://pytorch.org/memory_viz +profiler_steps: int | None +# Which step to start the profiler at. Useful for only capturing a few steps mid-run. +profiler_steps_start: int | None = 0 +# bool of whether to include tokens trainer per second in the training metrics. This +# iterates over the entire dataset once, so it takes some time. +include_tokens_per_second: bool | None + +# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to +# add noise to embeddings. Currently only supported on Llama and Mistral +neftune_noise_alpha: float | None + +# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to +# `beta` in `ORPOConfig` due to trl mapping. +orpo_alpha: float | None +# Weighting of NLL term in loss from RPO paper +rpo_alpha: float | None +# Target reward margin for the SimPO loss +simpo_gamma: float | None +# Weight of the BC regularizer +cpo_alpha: float | None + +# Factor for desirable loss term in KTO loss +kto_desirable_weight: float | None +# Factor for undesirable loss term in KTO loss +kto_undesirable_weight: float | None +# The beta parameter for the RL training +rl_beta: float | None + +# Defines the max memory usage per gpu on the system. Passed through to transformers +# when loading the model. +max_memory: dict[int | Literal['cpu', 'disk'], int | str] | None +# Limit the memory for all available GPUs to this amount (if an integer, expressed in +# gigabytes); default: unset +gpu_memory_limit: int | str | None +# Whether to use low_cpu_mem_usage +low_cpu_mem_usage: bool | None + +# The name of the chat template to use for training, following values are supported: +# tokenizer_default: Uses the chat template that is available in the +# tokenizer_config.json. If the chat template is not available in the tokenizer, it will +# raise an error. This is the default value. +# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates +# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py. +# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. +# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not +# available in the tokenizer. jinja: Uses a custom jinja template for the chat template. +# The custom jinja template should be provided in the chat_template_jinja field. The +# selected chat template will be saved to the tokenizer_config.json for easier +# inferencing +chat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None +# Custom jinja template or path to jinja file for chat template. This will be only used +# if chat_template is set to `jinja` or `null` (in which case chat_template is +# automatically set to `jinja`). Default is null. +chat_template_jinja: str | None +# Additional kwargs to pass to the chat template. This is useful for customizing the +# chat template. For example, you can pass `thinking=False` to add a generation prompt +# to the chat template. +chat_template_kwargs: dict[str, Any] | None +# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the +# boundaries between conversation turns. For example: ['/INST', '</s>', +# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is +# useful for templates that use multiple delimiter tokens. +eot_tokens: list[str] | None +# Changes the default system message. Currently only supports chatml. +default_system_message: str | None + +fix_untrained_tokens: int | list[int] | None + +is_preprocess: bool | None +preprocess_iterable: bool | None + +# Total number of tokens - internal use +total_num_tokens: int | None +total_supervised_tokens: int | None +# You can set these packing optimizations AFTER starting a training at least once. The +# trainer will provide recommended values for these values. +sample_packing_eff_est: float | None +axolotl_config_path: str | None + +# Internal use only - Used to identify which the model is based on +is_falcon_derived_model: bool | None +# Internal use only - Used to identify which the model is based on +is_llama_derived_model: bool | None +# Internal use only - Used to identify which the model is based on. Please note that if +# you set this to true, `padding_side` will be set to 'left' by default +is_mistral_derived_model: bool | None +# Internal use only - Used to identify which the model is based on +is_qwen_derived_model: bool | None + +# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available +# plugins or doc below for more details. +# https://docs.axolotl.ai/docs/custom_integrations.html +plugins: list[str] | None + +# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This +# can also be a relative path to a model on disk +base_model: str (required) +# If the base_model repo on hf hub doesn't include configuration .json files, You can +# set that here, or leave this empty to default to base_model +base_model_config: str | None +cls_model_config: str | None +# Optional tokenizer configuration path in case you want to use a different tokenizer +# than the one defined in the base model +tokenizer_config: str | None +# use_fast option for tokenizer loading from_pretrained, default to True +tokenizer_use_fast: bool | None +# Whether to use the legacy tokenizer setting, defaults to True +tokenizer_legacy: bool | None +# Whether to use mistral-common tokenizer. If set to True, it will use the mistral- +# common tokenizer. +tokenizer_use_mistral_common: bool | None +# Corresponding tokenizer for the model AutoTokenizer is a good choice +tokenizer_type: str | None +# transformers processor class +processor_type: str | None +# Trust remote code for untrusted source +trust_remote_code: bool | None + +# Where to save the full-finetuned model to +output_dir: str = ./model-out +# push checkpoints to hub +hub_model_id: str | None +# how to push checkpoints to hub +hub_strategy: str | None +# Save model as safetensors (require safetensors package). Default True +save_safetensors: bool | None = True + +# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer +load_in_8bit: bool | None = False +# Use bitsandbytes 4 bit +load_in_4bit: bool | None = False + +# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in +# original model +adapter: str | None +# If you already have a lora model trained that you want to load, put that here. This +# means after training, if you want to test the model, you should set this to the value +# of `output_dir`. Note that if you merge an adapter to the base model, a new +# subdirectory `merged` will be created under the `output_dir`. +lora_model_dir: str | None +lora_r: int | None +lora_alpha: int | None +lora_fan_in_fan_out: bool | None +lora_target_modules: str | list[str] | None +# If true, will target all linear modules +lora_target_linear: bool | None +# If you added new tokens to the tokenizer, you may need to save some LoRA modules +# because they need to know the new tokens. For LLaMA and Mistral, you need to save +# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts +# tokens to embeddings, and `lm_head` converts embeddings to token probabilities. +lora_modules_to_save: list[str] | None +lora_dropout: float | None = 0.0 +# The layer indices to transform, otherwise, apply to all layers +peft_layers_to_transform: list[int] | None +peft_layers_pattern: list[str] | None + +peft: PeftConfig | None + # For PeftConfig: + # Configuration options for loftq initialization for LoRA + loftq_config: LoftQConfig | None + # For LoftQConfig: + # typically 4 bits + loftq_bits: int = 4 + +# Whether to use DoRA. +peft_use_dora: bool | None +# Whether to use RSLoRA. +peft_use_rslora: bool | None +# List of layer indices to replicate. +peft_layer_replication: list[tuple[int, int]] | None +# How to initialize LoRA weights. Default to True which is MS original implementation. +peft_init_lora_weights: bool | str | None + +# load qlora model in sharded format for FSDP using answer.ai technique. +qlora_sharded_model_loading: bool | None = False +# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it +# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge +lora_on_cpu: bool | None +# Whether you are training a 4-bit GPTQ quantized model +gptq: bool | None +# optional overrides to the bnb 4bit quantization configuration +bnb_config_kwargs: dict[str, Any] | None + +# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4. +loraplus_lr_ratio: float | None +# loraplus learning rate for lora embedding layers. Default value is 1e-6. +loraplus_lr_embedding: float | None = 1e-06 + +merge_lora: bool | None + +# Number of steps per ReLoRA restart +relora_steps: int | None +# Number of per-restart warmup steps +relora_warmup_steps: int | None +# Number of anneal steps for each relora cycle +relora_anneal_steps: int | None +# threshold for optimizer magnitude when pruning +relora_prune_ratio: float | None +# True to perform lora weight merges on cpu during restarts, for modest gpu memory +# savings +relora_cpu_offload: bool | None + +# If greater than 1, backpropagation will be skipped and the gradients will be +# accumulated for the given number of steps. +gradient_accumulation_steps: int | None = 1 +# The number of samples to include in each batch. This is the number of samples sent to +# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps +micro_batch_size: int | None = 1 +# Total batch size, we do not recommended setting this manually +batch_size: int | None +# per gpu micro batch size for evals, defaults to value of micro_batch_size +eval_batch_size: int | None + +# whether to find batch size that fits in memory. Passed to underlying transformers +# Trainer +auto_find_batch_size: bool | None + +# Whether to mask out or include the human's prompt from the training labels +train_on_inputs: bool | None = False +# Group similarly sized data to minimize padding. May be slower to start, as it must +# download and sort the entire dataset. Note that training loss may have an oscillating +# pattern with this enabled. +group_by_length: bool | None + +learning_rate: str | float (required) +embedding_lr: float | None +embedding_lr_scale: float | None +# Specify weight decay +weight_decay: float | None = 0.0 +# Specify optimizer +optimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED +# Dictionary of arguments to pass to the optimizer +optim_args: str | dict[str, Any] | None +# The target modules to optimize, i.e. the module names that you would like to train, +# right now this is used only for GaLore algorithm +optim_target_modules: list[str] | Literal['all_linear'] | None +# Path to torch distx for optim 'adamw_anyprecision' +torchdistx_path: str | None +lr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE +# Specify a scheduler and kwargs to use with the optimizer +lr_scheduler_kwargs: dict[str, Any] | None +lr_quadratic_warmup: bool | None +# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of +# peak lr +cosine_min_lr_ratio: float | None +# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means +# start cosine_min_lr at 80% of training step +cosine_constant_lr_ratio: float | None +# Learning rate div factor +lr_div_factor: float | None -# adamw hyperparams -adam_epsilon: float | None -# only used for CAME Optimizer -adam_epsilon2: float | None -# adamw hyperparams -adam_beta1: float | None +lr_groups: list[LrGroup] | None + # For LrGroup: + name: str (required) + modules: list[str] (required) + lr: float (required) + # adamw hyperparams -adam_beta2: float | None +adam_epsilon: float | None # only used for CAME Optimizer -adam_beta3: float | None -# Gradient clipping max norm -max_grad_norm: float | None -num_epochs: float = 1.0 - -use_wandb: bool | None -# Set the name of your wandb run -wandb_name: str | None -# Set the ID of your wandb run -wandb_run_id: str | None -# "offline" to save run metadata locally and not sync to the server, "disabled" to turn -# off wandb -wandb_mode: str | None -# Your wandb project name -wandb_project: str | None -# A wandb Team name if using a Team -wandb_entity: str | None -wandb_watch: str | None -# "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only -# at the end of training -wandb_log_model: str | None - -use_mlflow: bool | None -# URI to mlflow -mlflow_tracking_uri: str | None -# Your experiment name -mlflow_experiment_name: str | None -# Your run name -mlflow_run_name: str | None -# set to true to copy each saved checkpoint on each save to mlflow artifact registry -hf_mlflow_log_artifacts: bool | None - -# Enable or disable Comet integration. -use_comet: bool | None -# API key for Comet. Recommended to set via `comet login`. -comet_api_key: str | None -# Workspace name in Comet. Defaults to the user's default workspace. -comet_workspace: str | None -# Project name in Comet. Defaults to Uncategorized. -comet_project_name: str | None -# Identifier for the experiment. Used to append data to an existing experiment or -# control the key of new experiments. Default to a random key. -comet_experiment_key: str | None -# Create a new experiment ("create") or log to an existing one ("get"). Default -# ("get_or_create") auto-selects based on configuration. -comet_mode: str | None -# Set to True to log data to Comet server, or False for offline storage. Default is -# True. -comet_online: bool | None -# Dictionary for additional configuration settings, see the doc for more details. -comet_experiment_config: dict[str, Any] | None - -# the number of activate layers in LISA -lisa_n_layers: int | None -# how often to switch layers in LISA -lisa_step_interval: int | None -# path under the model to access the layers -lisa_layers_attribute: str | None = model.layers - -gradio_title: str | None -gradio_share: bool | None -gradio_server_name: str | None -gradio_server_port: int | None -gradio_max_new_tokens: int | None -gradio_temperature: float | None - -use_ray: bool = False -ray_run_name: str | None -ray_num_workers: int = 1 -resources_per_worker: dict - -# The size of the image to resize to. It can be an integer (resized into padded-square -# image) or a tuple (width, height).If not provided, we will attempt to load from -# preprocessor.size, otherwise, images won't be resized. -image_size: int | tuple[int, int] | None -# The resampling algorithm to use for image resizing. Default is bilinear. Please refer -# to PIL.Image.Resampling for more details. -image_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None - -# optional overrides to the base model configuration -overrides_of_model_config: dict[str, Any] | None -# optional overrides the base model loading from_pretrained -overrides_of_model_kwargs: dict[str, Any] | None -# If you want to specify the type of model to load, AutoModelForCausalLM is a good -# choice too -type_of_model: str | None -# You can specify to choose a specific model revision from huggingface hub -revision_of_model: str | None - -max_packed_sequence_len: int | None -rope_scaling: Any | None -noisy_embedding_alpha: float | None -dpo_beta: float | None -evaluation_strategy: str | None +adam_epsilon2: float | None +# adamw hyperparams +adam_beta1: float | None +# adamw hyperparams +adam_beta2: float | None +# only used for CAME Optimizer +adam_beta3: float | None +# Gradient clipping max norm +max_grad_norm: float | None +num_epochs: float = 1.0 + +use_wandb: bool | None +# Set the name of your wandb run +wandb_name: str | None +# Set the ID of your wandb run +wandb_run_id: str | None +# "offline" to save run metadata locally and not sync to the server, "disabled" to turn +# off wandb +wandb_mode: str | None +# Your wandb project name +wandb_project: str | None +# A wandb Team name if using a Team +wandb_entity: str | None +wandb_watch: str | None +# "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only +# at the end of training +wandb_log_model: str | None + +use_mlflow: bool | None +# URI to mlflow +mlflow_tracking_uri: str | None +# Your experiment name +mlflow_experiment_name: str | None +# Your run name +mlflow_run_name: str | None +# set to true to copy each saved checkpoint on each save to mlflow artifact registry +hf_mlflow_log_artifacts: bool | None + +# Enable or disable Comet integration. +use_comet: bool | None +# API key for Comet. Recommended to set via `comet login`. +comet_api_key: str | None +# Workspace name in Comet. Defaults to the user's default workspace. +comet_workspace: str | None +# Project name in Comet. Defaults to Uncategorized. +comet_project_name: str | None +# Identifier for the experiment. Used to append data to an existing experiment or +# control the key of new experiments. Default to a random key. +comet_experiment_key: str | None +# Create a new experiment ("create") or log to an existing one ("get"). Default +# ("get_or_create") auto-selects based on configuration. +comet_mode: str | None +# Set to True to log data to Comet server, or False for offline storage. Default is +# True. +comet_online: bool | None +# Dictionary for additional configuration settings, see the doc for more details. +comet_experiment_config: dict[str, Any] | None + +# the number of activate layers in LISA +lisa_n_layers: int | None +# how often to switch layers in LISA +lisa_step_interval: int | None +# path under the model to access the layers +lisa_layers_attribute: str | None = model.layers + +gradio_title: str | None +gradio_share: bool | None +gradio_server_name: str | None +gradio_server_port: int | None +gradio_max_new_tokens: int | None +gradio_temperature: float | None + +use_ray: bool = False +ray_run_name: str | None +ray_num_workers: int = 1 +resources_per_worker: dict + +# The size of the image to resize to. It can be an integer (resized into padded-square +# image) or a tuple (width, height).If not provided, we will attempt to load from +# preprocessor.size, otherwise, images won't be resized. +image_size: int | tuple[int, int] | None +# The resampling algorithm to use for image resizing. Default is bilinear. Please refer +# to PIL.Image.Resampling for more details. +image_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None + +# optional overrides to the base model configuration +overrides_of_model_config: dict[str, Any] | None +# optional overrides the base model loading from_pretrained +overrides_of_model_kwargs: dict[str, Any] | None +# If you want to specify the type of model to load, AutoModelForCausalLM is a good +# choice too +type_of_model: str | None +# You can specify to choose a specific model revision from huggingface hub +revision_of_model: str | None + +max_packed_sequence_len: int | None +rope_scaling: Any | None +noisy_embedding_alpha: float | None +dpo_beta: float | None +evaluation_strategy: str | None diff --git a/docs/custom_integrations.html b/docs/custom_integrations.html index 989565971..f66137bd0 100644 --- a/docs/custom_integrations.html +++ b/docs/custom_integrations.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/dataset-formats/conversation.html b/docs/dataset-formats/conversation.html index 7dd8a4521..15fc4f38d 100644 --- a/docs/dataset-formats/conversation.html +++ b/docs/dataset-formats/conversation.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/dataset-formats/index.html b/docs/dataset-formats/index.html index 64c6b28cc..0320ad206 100644 --- a/docs/dataset-formats/index.html +++ b/docs/dataset-formats/index.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/dataset-formats/inst_tune.html b/docs/dataset-formats/inst_tune.html index 9f84a3c30..a6d0099c3 100644 --- a/docs/dataset-formats/inst_tune.html +++ b/docs/dataset-formats/inst_tune.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/dataset-formats/pretraining.html b/docs/dataset-formats/pretraining.html index 51a03be19..87def3245 100644 --- a/docs/dataset-formats/pretraining.html +++ b/docs/dataset-formats/pretraining.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/dataset-formats/stepwise_supervised.html b/docs/dataset-formats/stepwise_supervised.html index bcf822efc..022017667 100644 --- a/docs/dataset-formats/stepwise_supervised.html +++ b/docs/dataset-formats/stepwise_supervised.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/dataset-formats/template_free.html b/docs/dataset-formats/template_free.html index 9205718e8..16c243408 100644 --- a/docs/dataset-formats/template_free.html +++ b/docs/dataset-formats/template_free.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/dataset-formats/tokenized.html b/docs/dataset-formats/tokenized.html index 23606d97a..5e183b17f 100644 --- a/docs/dataset-formats/tokenized.html +++ b/docs/dataset-formats/tokenized.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/dataset_loading.html b/docs/dataset_loading.html index e2ef0e540..27d41af8b 100644 --- a/docs/dataset_loading.html +++ b/docs/dataset_loading.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/dataset_preprocessing.html b/docs/dataset_preprocessing.html index 27da414ef..75e3df0d5 100644 --- a/docs/dataset_preprocessing.html +++ b/docs/dataset_preprocessing.html @@ -391,6 +391,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/debugging.html b/docs/debugging.html index c628329fc..b23828631 100644 --- a/docs/debugging.html +++ b/docs/debugging.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/docker.html b/docs/docker.html index 32a1f4e02..64b1fb2d0 100644 --- a/docs/docker.html +++ b/docs/docker.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/faq.html b/docs/faq.html index 52c0c5769..69348becd 100644 --- a/docs/faq.html +++ b/docs/faq.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/fsdp_qlora.html b/docs/fsdp_qlora.html index 0332421ce..66d22c2fe 100644 --- a/docs/fsdp_qlora.html +++ b/docs/fsdp_qlora.html @@ -391,6 +391,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/getting-started.html b/docs/getting-started.html index 6d39b3b41..9d8cb0599 100644 --- a/docs/getting-started.html +++ b/docs/getting-started.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/gradient_checkpointing.html b/docs/gradient_checkpointing.html new file mode 100644 index 000000000..b413c8461 --- /dev/null +++ b/docs/gradient_checkpointing.html @@ -0,0 +1,928 @@ + + + + + + + + + +Gradient Checkpointing and Activation Offloading – Axolotl + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + +
    + +
    + + +
    + + + +
    + +
    +
    +

    Gradient Checkpointing and Activation Offloading

    +
    + + + +
    + + + + +
    + + + +
    + + +

    Gradient checkpointing and activation offloading are techniques used to optimize the performance of deep learning +models by reducing the memory footprint and improving computational efficiency.

    +
    +

    Enabling Gradient Checkpointing

    +
    gradient_checkpointing: true
    +
    +
    +

    Enabling Activation Offloading

    +
    gradient_checkpointing: true  # required for activation offloading
    +activation_offloading: true
    +

    Activation offloading variants:

    +

    The default activation_offloading: true offloads activations to CPU and uses CUDA streams +to overlap the communications and computations when offloading.

    +

    The activation_offloading: legacy naively offloads activations to CPU and without additional optimizations.

    +

    For resource constrained environments with limited CPU memory, activation_offloading: disk offloads +activations to disk instead of CPU RAM so that much larger context lengths can be trained with minimal memory.

    + + +
    + +
    + +
    + + + + + \ No newline at end of file diff --git a/docs/inference.html b/docs/inference.html index 6526b543f..bb8250d0e 100644 --- a/docs/inference.html +++ b/docs/inference.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/input_output.html b/docs/input_output.html index b46b9cc59..0827e1795 100644 --- a/docs/input_output.html +++ b/docs/input_output.html @@ -391,6 +391,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/installation.html b/docs/installation.html index b40992387..448546cb8 100644 --- a/docs/installation.html +++ b/docs/installation.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/lora_optims.html b/docs/lora_optims.html index 7060d98be..5d7e647ce 100644 --- a/docs/lora_optims.html +++ b/docs/lora_optims.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/lr_groups.html b/docs/lr_groups.html index 2ba536d27..49a60aab2 100644 --- a/docs/lr_groups.html +++ b/docs/lr_groups.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/mac.html b/docs/mac.html index ee65b7a0e..e29f9d1b7 100644 --- a/docs/mac.html +++ b/docs/mac.html @@ -391,6 +391,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/multi-gpu.html b/docs/multi-gpu.html index cefd66a25..32635dfaf 100644 --- a/docs/multi-gpu.html +++ b/docs/multi-gpu.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/multi-node.html b/docs/multi-node.html index 4f957626b..403d2a296 100644 --- a/docs/multi-node.html +++ b/docs/multi-node.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/multimodal.html b/docs/multimodal.html index 8165111c0..2ac254bc3 100644 --- a/docs/multimodal.html +++ b/docs/multimodal.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/multipack.html b/docs/multipack.html index 23be54a63..73b65e49c 100644 --- a/docs/multipack.html +++ b/docs/multipack.html @@ -391,6 +391,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/nccl.html b/docs/nccl.html index e42267cb7..c6a0e2577 100644 --- a/docs/nccl.html +++ b/docs/nccl.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/qat.html b/docs/qat.html index df7b81563..437daa36a 100644 --- a/docs/qat.html +++ b/docs/qat.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/quantize.html b/docs/quantize.html index 5293ee720..2a670807e 100644 --- a/docs/quantize.html +++ b/docs/quantize.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/ray-integration.html b/docs/ray-integration.html index 9c554ebf6..553ee50e2 100644 --- a/docs/ray-integration.html +++ b/docs/ray-integration.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/reward_modelling.html b/docs/reward_modelling.html index 9bbd83671..fe4527f35 100644 --- a/docs/reward_modelling.html +++ b/docs/reward_modelling.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/rlhf.html b/docs/rlhf.html index 4b225aedb..83ddbbabb 100644 --- a/docs/rlhf.html +++ b/docs/rlhf.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/sequence_parallelism.html b/docs/sequence_parallelism.html index bc39d68dc..0d66f86d9 100644 --- a/docs/sequence_parallelism.html +++ b/docs/sequence_parallelism.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/torchao.html b/docs/torchao.html index c25c6e8a1..cc25bb37d 100644 --- a/docs/torchao.html +++ b/docs/torchao.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/docs/unsloth.html b/docs/unsloth.html index 6e0f29210..993e48071 100644 --- a/docs/unsloth.html +++ b/docs/unsloth.html @@ -426,6 +426,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/examples/colab-notebooks/colab-axolotl-example.html b/examples/colab-notebooks/colab-axolotl-example.html index d36e86550..c771d47a6 100644 --- a/examples/colab-notebooks/colab-axolotl-example.html +++ b/examples/colab-notebooks/colab-axolotl-example.html @@ -429,6 +429,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/index.html b/index.html index 7c1a5037b..db7f1af32 100644 --- a/index.html +++ b/index.html @@ -425,6 +425,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/search.json b/search.json index d2579d462..cd004aa35 100644 --- a/search.json +++ b/search.json @@ -466,14 +466,14 @@ "href": "docs/api/monkeypatch.gradient_checkpointing.offload_cpu.html", "title": "monkeypatch.gradient_checkpointing.offload_cpu", "section": "", - "text": "monkeypatch.gradient_checkpointing.offload_cpu\nCPU offloaded checkpointing\n\n\n\n\n\nName\nDescription\n\n\n\n\nCPU_Offloaded_Gradient_Checkpointer\nSaves VRAM by smartly offloading to RAM.\n\n\nCheckpointFunctionWithCPUOffload\nThis is a torch/utils/checkpoint.py CheckpointFunction monkey patch that offloads the first tensor to cpu during forward and back to cuda during backward. This allows significant memory savings when using a very long seqlen. e.g. for llama 8b at 100k it’s 24GB saved per gpu: ((100_000*4096)*2*32/2**30)\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_cpu.CPU_Offloaded_Gradient_Checkpointer(\n)\nSaves VRAM by smartly offloading to RAM.\nTiny hit to performance, since we mask the movement via non blocking calls.\n\n\n\nmonkeypatch.gradient_checkpointing.offload_cpu.CheckpointFunctionWithCPUOffload(\n)\nThis is a torch/utils/checkpoint.py CheckpointFunction monkey patch that offloads the first tensor to cpu during forward and back to cuda during backward. This allows significant memory savings when using a very long seqlen. e.g. for llama 8b at 100k it’s 24GB saved per gpu: ((100_000*4096)*2*32/2**30)\nIn the case of a very long seqlen 100k+ the copying to/from cpu overhead is not big, because dense quadratic attention compute will dominate." + "text": "monkeypatch.gradient_checkpointing.offload_cpu\nCPU offloaded checkpointing\n\n\n\n\n\nName\nDescription\n\n\n\n\nCPU_Offloaded_Gradient_Checkpointer\nSaves VRAM by smartly offloading to RAM.\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_cpu.CPU_Offloaded_Gradient_Checkpointer(\n)\nSaves VRAM by smartly offloading to RAM.\nTiny hit to performance, since we mask the movement via non blocking calls." }, { "objectID": "docs/api/monkeypatch.gradient_checkpointing.offload_cpu.html#classes", "href": "docs/api/monkeypatch.gradient_checkpointing.offload_cpu.html#classes", "title": "monkeypatch.gradient_checkpointing.offload_cpu", "section": "", - "text": "Name\nDescription\n\n\n\n\nCPU_Offloaded_Gradient_Checkpointer\nSaves VRAM by smartly offloading to RAM.\n\n\nCheckpointFunctionWithCPUOffload\nThis is a torch/utils/checkpoint.py CheckpointFunction monkey patch that offloads the first tensor to cpu during forward and back to cuda during backward. This allows significant memory savings when using a very long seqlen. e.g. for llama 8b at 100k it’s 24GB saved per gpu: ((100_000*4096)*2*32/2**30)\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_cpu.CPU_Offloaded_Gradient_Checkpointer(\n)\nSaves VRAM by smartly offloading to RAM.\nTiny hit to performance, since we mask the movement via non blocking calls.\n\n\n\nmonkeypatch.gradient_checkpointing.offload_cpu.CheckpointFunctionWithCPUOffload(\n)\nThis is a torch/utils/checkpoint.py CheckpointFunction monkey patch that offloads the first tensor to cpu during forward and back to cuda during backward. This allows significant memory savings when using a very long seqlen. e.g. for llama 8b at 100k it’s 24GB saved per gpu: ((100_000*4096)*2*32/2**30)\nIn the case of a very long seqlen 100k+ the copying to/from cpu overhead is not big, because dense quadratic attention compute will dominate." + "text": "Name\nDescription\n\n\n\n\nCPU_Offloaded_Gradient_Checkpointer\nSaves VRAM by smartly offloading to RAM.\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_cpu.CPU_Offloaded_Gradient_Checkpointer(\n)\nSaves VRAM by smartly offloading to RAM.\nTiny hit to performance, since we mask the movement via non blocking calls." }, { "objectID": "docs/api/common.architectures.html", @@ -1661,6 +1661,49 @@ "Quantization Aware Training (QAT)" ] }, + { + "objectID": "docs/gradient_checkpointing.html", + "href": "docs/gradient_checkpointing.html", + "title": "Gradient Checkpointing and Activation Offloading", + "section": "", + "text": "Gradient checkpointing and activation offloading are techniques used to optimize the performance of deep learning\nmodels by reducing the memory footprint and improving computational efficiency.\n\nEnabling Gradient Checkpointing\ngradient_checkpointing: true\n\n\nEnabling Activation Offloading\ngradient_checkpointing: true # required for activation offloading\nactivation_offloading: true\nActivation offloading variants:\nThe default activation_offloading: true offloads activations to CPU and uses CUDA streams\nto overlap the communications and computations when offloading.\nThe activation_offloading: legacy naively offloads activations to CPU and without additional optimizations.\nFor resource constrained environments with limited CPU memory, activation_offloading: disk offloads\nactivations to disk instead of CPU RAM so that much larger context lengths can be trained with minimal memory.", + "crumbs": [ + "Advanced Features", + "Gradient Checkpointing and Activation Offloading" + ] + }, + { + "objectID": "docs/input_output.html", + "href": "docs/input_output.html", + "title": "Template-free prompt construction", + "section": "", + "text": "The documentation moved to here." + }, + { + "objectID": "src/axolotl/integrations/LICENSE.html", + "href": "src/axolotl/integrations/LICENSE.html", + "title": "Axolotl", + "section": "", + "text": "AXOLOTL COMMUNITY LICENSE AGREEMENT\nThis Axolotl Community License Agreement (“Agreement”) is entered into by and between Axolotl AI Corp. (“Axolotl”) and\nany individual or entity (“Licensee”) who wishes to use the Software (as defined below) in accordance with the terms\nand conditions set forth in this Agreement.\n\nDefinitions\n1.1 “Licensee” refers to any individual or entity who has obtained a copy of the Software under this Agreement.\n1.2 “Plugin Integration” means independent integration software modules which may or may not be offered by Axolotl,\nwhich may be licensed separately by their respective authors and/or licensors.\n1.3 “Software” refers to the specific sub-directory of the Axolotl, Inc. software located at\nhttps://github.com/axolotl-ai-cloud/axolotl/tree/main/src/axolotl/integrations and its subdirectories which\npermits Plugin Integrations to integrate with the Axolotl service.\nGrant of License\n2.1 Axolotl hereby grants Licensee a worldwide, non-exclusive, royalty-free, license to use, copy, modify, merge,\npublish, distribute, sublicense, and/or otherwise exploit the Software, subject to the following conditions:\n- Licensee must comply with all the terms and conditions of this Agreement.\n- Licensee must include the original copyright notice and disclaimer of warranty in all copies or substantial\nportions of the Software.\n2.2 Licensee may use the Software for any lawful purpose, except as restricted in Section 3.\nRestrictions\n3.1 Licensee shall not use the Software for any activity that constitutes a commercial activity of offering for\nfree or for sale any services, platform, or equivalent to third parties for the purposes of allowing such\nthird parties to fine-tune artificial intelligence models.\n3.2 Licensee shall not:\n- Use the Software for any illegal or unauthorized purpose.\n- Reverse engineer, decompile, or disassemble the Software.\n- Remove or modify any copyright, trademark, or other proprietary notices contained in the Software.\n- Use the Software in a way that could damage, disable, overburden, or impair the functionality of the\nSoftware or interfere with any third-party use of the Software.\n3.3 Axolotl reserves the right to restrict certain Plugin Integrations for use with the Software. To the extent Licensee integrates a permitted, applicable Plugin Integration with the Software, Licensee shall comply with any additional terms and conditions imposed by the licensors of such Plugin Integration for use of such Plugin Integrations. Licensee shall contact Axolotl if it has questions about whether its use of the Software falls beyond the scope of this Agreement.\nIntellectual Property Rights\n4.1 Axolotl and its contributors retain all intellectual property rights in and to the Software. Licensee\nacknowledges that this Agreement does not transfer any ownership rights or intellectual property rights to\nLicensee.\nDisclaimer of Warranty\n5.1 THE SOFTWARE IS PROVIDED “AS IS,” WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. IN NO EVENT SHALL\nTHE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF\nCONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\nTermination\n6.1 Axolotl may terminate this Agreement at any time if Licensee fails to comply with any of the terms and\nconditions set forth herein. Upon termination, Licensee shall cease all use of the Software and destroy any\ncopies in its possession.\nGoverning Law\n7.1 This Agreement shall be governed by and construed in accordance with the laws of the State of California,\nwithout regards to conflicts of laws provisions thereof.\nEntire Agreement\n8.1 This Agreement constitutes the entire agreement between Axolotl and Licensee with respect to the subject matter\nhereof and supersedes all prior or contemporaneous understandings or agreements between the parties concerning\nthe Software, whether written or oral. Axolotl may update the terms of this Agreement from time to time, and\nLicensee’s continued use of the Software after any such updates shall constitute acceptance of updated terms\non a go-forward basis. Axolotl will use commercially reasonable efforts to provide Licensee notice of any\nmaterial updates. By using the Software, Licensee acknowledges that it has read, understood, and agrees to be\nbound by the terms and conditions of this Agreement.\n\nThis Agreement was last updated on August 23, 2024." + }, + { + "objectID": "src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html", + "href": "src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html", + "title": "Axolotl", + "section": "", + "text": "Acknowledgements\nPortions of this Cut Cross Entropy Software may utilize the following copyrighted\nmaterial, the use of which is hereby acknowledged.\n\nPyTorch\nFrom PyTorch:\n\nCopyright (c) 2016- Facebook, Inc (Adam Paszke)\nCopyright (c) 2014- Facebook, Inc (Soumith Chintala)\nCopyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\nCopyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\nCopyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\nCopyright (c) 2011-2013 NYU (Clement Farabet)\nCopyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\nCopyright (c) 2006 Idiap Research Institute (Samy Bengio)\nCopyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\nFrom Caffe2:\n\nCopyright (c) 2016-present, Facebook Inc. All rights reserved.\n\nAll contributions by Facebook:\nCopyright (c) 2016 Facebook Inc.\n\nAll contributions by Google:\nCopyright (c) 2015 Google Inc.\nAll rights reserved.\n\nAll contributions by Yangqing Jia:\nCopyright (c) 2015 Yangqing Jia\nAll rights reserved.\n\nAll contributions by Kakao Brain:\nCopyright 2019-2020 Kakao Brain\n\nAll contributions by Cruise LLC:\nCopyright (c) 2022 Cruise LLC.\nAll rights reserved.\n\nAll contributions by Arm:\nCopyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates\n\nAll contributions from Caffe:\nCopyright(c) 2013, 2014, 2015, the respective contributors\nAll rights reserved.\n\nAll other contributions:\nCopyright(c) 2015, 2016 the respective contributors\nAll rights reserved.\n\nCaffe2 uses a copyright model similar to Caffe: each contributor holds\ncopyright over their contributions to Caffe2. The project versioning records\nall such contribution and copyright details. If a contributor wants to further\nmark their specific copyright on a particular contribution, they should\nindicate their copyright solely in the commit message of the change when it is\ncommitted.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America\nand IDIAP Research Institute nor the names of its contributors may be\nused to endorse or promote products derived from this software without\nspecific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\nTriton\n/*\n* Copyright 2018-2020 Philippe Tillet\n* Copyright 2020-2022 OpenAI\n*\n* Permission is hereby granted, free of charge, to any person obtaining\n* a copy of this software and associated documentation files\n* (the \"Software\"), to deal in the Software without restriction,\n* including without limitation the rights to use, copy, modify, merge,\n* publish, distribute, sublicense, and/or sell copies of the Software,\n* and to permit persons to whom the Software is furnished to do so,\n* subject to the following conditions:\n*\n* The above copyright notice and this permission notice shall be\n* included in all copies or substantial portions of the Software.\n*\n* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n*/\nTransformers\nCopyright 2018- The Hugging Face team. All rights reserved.\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License." + }, + { + "objectID": "docs/mac.html", + "href": "docs/mac.html", + "title": "Mac M-series", + "section": "", + "text": "Currently Axolotl on Mac is partially usable, many of the dependencies of Axolotl including Pytorch do not support MPS or have incomplete support.\nCurrent support:\n\nSupport for all models\nFull training of models\nLoRA training\nSample packing\nFP16 and BF16 (awaiting AMP support for MPS in Pytorch)\nTri-dao’s flash-attn (until it is supported use spd_attention as an alternative)\nxformers\nbitsandbytes (meaning no 4/8 bits loading and bnb optimizers)\nqlora\nDeepSpeed\n\nUntested:\n\nFSDP", + "crumbs": [ + "Deployments", + "Mac M-series" + ] + }, { "objectID": "docs/lr_groups.html", "href": "docs/lr_groups.html", @@ -1694,38 +1737,6 @@ "Learning Rate Groups" ] }, - { - "objectID": "docs/mac.html", - "href": "docs/mac.html", - "title": "Mac M-series", - "section": "", - "text": "Currently Axolotl on Mac is partially usable, many of the dependencies of Axolotl including Pytorch do not support MPS or have incomplete support.\nCurrent support:\n\nSupport for all models\nFull training of models\nLoRA training\nSample packing\nFP16 and BF16 (awaiting AMP support for MPS in Pytorch)\nTri-dao’s flash-attn (until it is supported use spd_attention as an alternative)\nxformers\nbitsandbytes (meaning no 4/8 bits loading and bnb optimizers)\nqlora\nDeepSpeed\n\nUntested:\n\nFSDP", - "crumbs": [ - "Deployments", - "Mac M-series" - ] - }, - { - "objectID": "src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html", - "href": "src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html", - "title": "Axolotl", - "section": "", - "text": "Acknowledgements\nPortions of this Cut Cross Entropy Software may utilize the following copyrighted\nmaterial, the use of which is hereby acknowledged.\n\nPyTorch\nFrom PyTorch:\n\nCopyright (c) 2016- Facebook, Inc (Adam Paszke)\nCopyright (c) 2014- Facebook, Inc (Soumith Chintala)\nCopyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\nCopyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\nCopyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\nCopyright (c) 2011-2013 NYU (Clement Farabet)\nCopyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\nCopyright (c) 2006 Idiap Research Institute (Samy Bengio)\nCopyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\nFrom Caffe2:\n\nCopyright (c) 2016-present, Facebook Inc. All rights reserved.\n\nAll contributions by Facebook:\nCopyright (c) 2016 Facebook Inc.\n\nAll contributions by Google:\nCopyright (c) 2015 Google Inc.\nAll rights reserved.\n\nAll contributions by Yangqing Jia:\nCopyright (c) 2015 Yangqing Jia\nAll rights reserved.\n\nAll contributions by Kakao Brain:\nCopyright 2019-2020 Kakao Brain\n\nAll contributions by Cruise LLC:\nCopyright (c) 2022 Cruise LLC.\nAll rights reserved.\n\nAll contributions by Arm:\nCopyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates\n\nAll contributions from Caffe:\nCopyright(c) 2013, 2014, 2015, the respective contributors\nAll rights reserved.\n\nAll other contributions:\nCopyright(c) 2015, 2016 the respective contributors\nAll rights reserved.\n\nCaffe2 uses a copyright model similar to Caffe: each contributor holds\ncopyright over their contributions to Caffe2. The project versioning records\nall such contribution and copyright details. If a contributor wants to further\nmark their specific copyright on a particular contribution, they should\nindicate their copyright solely in the commit message of the change when it is\ncommitted.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America\nand IDIAP Research Institute nor the names of its contributors may be\nused to endorse or promote products derived from this software without\nspecific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\nTriton\n/*\n* Copyright 2018-2020 Philippe Tillet\n* Copyright 2020-2022 OpenAI\n*\n* Permission is hereby granted, free of charge, to any person obtaining\n* a copy of this software and associated documentation files\n* (the \"Software\"), to deal in the Software without restriction,\n* including without limitation the rights to use, copy, modify, merge,\n* publish, distribute, sublicense, and/or sell copies of the Software,\n* and to permit persons to whom the Software is furnished to do so,\n* subject to the following conditions:\n*\n* The above copyright notice and this permission notice shall be\n* included in all copies or substantial portions of the Software.\n*\n* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n*/\nTransformers\nCopyright 2018- The Hugging Face team. All rights reserved.\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License." - }, - { - "objectID": "src/axolotl/integrations/LICENSE.html", - "href": "src/axolotl/integrations/LICENSE.html", - "title": "Axolotl", - "section": "", - "text": "AXOLOTL COMMUNITY LICENSE AGREEMENT\nThis Axolotl Community License Agreement (“Agreement”) is entered into by and between Axolotl AI Corp. (“Axolotl”) and\nany individual or entity (“Licensee”) who wishes to use the Software (as defined below) in accordance with the terms\nand conditions set forth in this Agreement.\n\nDefinitions\n1.1 “Licensee” refers to any individual or entity who has obtained a copy of the Software under this Agreement.\n1.2 “Plugin Integration” means independent integration software modules which may or may not be offered by Axolotl,\nwhich may be licensed separately by their respective authors and/or licensors.\n1.3 “Software” refers to the specific sub-directory of the Axolotl, Inc. software located at\nhttps://github.com/axolotl-ai-cloud/axolotl/tree/main/src/axolotl/integrations and its subdirectories which\npermits Plugin Integrations to integrate with the Axolotl service.\nGrant of License\n2.1 Axolotl hereby grants Licensee a worldwide, non-exclusive, royalty-free, license to use, copy, modify, merge,\npublish, distribute, sublicense, and/or otherwise exploit the Software, subject to the following conditions:\n- Licensee must comply with all the terms and conditions of this Agreement.\n- Licensee must include the original copyright notice and disclaimer of warranty in all copies or substantial\nportions of the Software.\n2.2 Licensee may use the Software for any lawful purpose, except as restricted in Section 3.\nRestrictions\n3.1 Licensee shall not use the Software for any activity that constitutes a commercial activity of offering for\nfree or for sale any services, platform, or equivalent to third parties for the purposes of allowing such\nthird parties to fine-tune artificial intelligence models.\n3.2 Licensee shall not:\n- Use the Software for any illegal or unauthorized purpose.\n- Reverse engineer, decompile, or disassemble the Software.\n- Remove or modify any copyright, trademark, or other proprietary notices contained in the Software.\n- Use the Software in a way that could damage, disable, overburden, or impair the functionality of the\nSoftware or interfere with any third-party use of the Software.\n3.3 Axolotl reserves the right to restrict certain Plugin Integrations for use with the Software. To the extent Licensee integrates a permitted, applicable Plugin Integration with the Software, Licensee shall comply with any additional terms and conditions imposed by the licensors of such Plugin Integration for use of such Plugin Integrations. Licensee shall contact Axolotl if it has questions about whether its use of the Software falls beyond the scope of this Agreement.\nIntellectual Property Rights\n4.1 Axolotl and its contributors retain all intellectual property rights in and to the Software. Licensee\nacknowledges that this Agreement does not transfer any ownership rights or intellectual property rights to\nLicensee.\nDisclaimer of Warranty\n5.1 THE SOFTWARE IS PROVIDED “AS IS,” WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. IN NO EVENT SHALL\nTHE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF\nCONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\nTermination\n6.1 Axolotl may terminate this Agreement at any time if Licensee fails to comply with any of the terms and\nconditions set forth herein. Upon termination, Licensee shall cease all use of the Software and destroy any\ncopies in its possession.\nGoverning Law\n7.1 This Agreement shall be governed by and construed in accordance with the laws of the State of California,\nwithout regards to conflicts of laws provisions thereof.\nEntire Agreement\n8.1 This Agreement constitutes the entire agreement between Axolotl and Licensee with respect to the subject matter\nhereof and supersedes all prior or contemporaneous understandings or agreements between the parties concerning\nthe Software, whether written or oral. Axolotl may update the terms of this Agreement from time to time, and\nLicensee’s continued use of the Software after any such updates shall constitute acceptance of updated terms\non a go-forward basis. Axolotl will use commercially reasonable efforts to provide Licensee notice of any\nmaterial updates. By using the Software, Licensee acknowledges that it has read, understood, and agrees to be\nbound by the terms and conditions of this Agreement.\n\nThis Agreement was last updated on August 23, 2024." - }, - { - "objectID": "docs/input_output.html", - "href": "docs/input_output.html", - "title": "Template-free prompt construction", - "section": "", - "text": "The documentation moved to here." - }, { "objectID": "docs/dataset_loading.html", "href": "docs/dataset_loading.html", @@ -2688,7 +2699,7 @@ "href": "docs/config-reference.html", "title": "Config Reference", "section": "", - "text": "# Allow overwrite yml config using from cli\nstrict: bool | None = False\n# Resume from a specific checkpoint dir\nresume_from_checkpoint: str | None\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: bool | None\n# Resize the model embeddings when new tokens are added to multiples of 32. This is\n# reported to improve training speed on some models\nresize_token_embeddings_to_32x: bool | None\nmean_resizing_embeddings: bool | None = False\n\n# Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings: bool | None\n# Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs\nembeddings_skip_upcast: bool | None\n\n# Use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl: RLType | None\n\ntrl: TRLConfig | None\n # For TRLConfig:\n # Beta parameter for the RL training. Same as `rl_beta`. Use\n beta: float | None\n # Maximum length of the completion for RL training.\n max_completion_length: int | None\n\n # Whether to use VLLM for RL training.\n use_vllm: bool = False\n # VLLM mode to use, one of 'server' or 'colocate'\n vllm_mode: Literal['server', 'colocate'] | None\n # Host of the vLLM server to connect to.\n vllm_server_host: str | None = 0.0.0.0\n # Port of the vLLM server to connect to.\n vllm_server_port: int | None = 8000\n # Total timeout (in seconds) to wait for the vLLM server to respond.\n vllm_server_timeout: int | None\n # Regex for vLLM guided decoding.\n vllm_guided_decoding_regex: str | None\n\n # List of reward functions to load. Paths must be importable from current dir.\n reward_funcs: list[str] | None\n # List of reward weights for the reward functions.\n reward_weights: list[float] | None\n # Number of generations to sample.\n num_generations: int | None\n # Whether to log completions.\n log_completions: bool | None = False\n # Number of completions to print when log_completions is True.\n num_completions_to_print: int | None\n # Whether to sync the reference model.\n sync_ref_model: bool | None = False\n # Mixup alpha for the reference model.\n ref_model_mixup_alpha: float | None = 0.9\n # Sync steps for the reference model.\n ref_model_sync_steps: int | None = 64\n # Whether to scale rewards by their standard deviation.\n scale_rewards: bool = True\n\n # Sampling temperature for the GRPO policy.\n temperature: float | None\n # Top-p sampling probability for the generation policy.\n top_p: float | None\n # Top-k sampling for the generation policy.\n top_k: int | None\n # Minimum probability for the generation policy.\n min_p: float | None\n # Penalty for tokens that appear in prompt and generated text.\n repetition_penalty: float | None\n # Number of iterations per batch (μ) for GRPO.\n num_iterations: int | None\n # Epsilon value for clipping in the GRPO algorithm.\n epsilon: float | None\n # Upper-bound epsilon value for clipping in the GRPO algorithm.\n epsilon_high: float | None\n # Whether to use Liger loss for GRPO.\n use_liger_loss: bool | None\n # Loss formulation to use. Supported values: grpo, bnpo, dr_grpo.\n loss_type: str | None\n # Whether to exclude truncated completions from loss calculation.\n mask_truncated_completions: bool = False\n\nvllm: VllmConfig | None\n # For VllmConfig:\n # Device to use for VLLM\n device: str | None = auto\n # Tensor parallel size for VLLM\n tensor_parallel_size: int | None\n # Data parallel size for VLLM\n data_parallel_size: int | None\n # GPU memory utilization for VLLM\n gpu_memory_utilization: float | None = 0.9\n # Data type for VLLM\n dtype: str | None = auto\n # Maximum length of the model context for VLLM\n max_model_len: int | None\n # Enable prefix caching for VLLM\n enable_prefix_caching: bool | None\n # Host for the vLLM server to start on\n host: str | None = 0.0.0.0\n # Port of the vLLM server to start on\n port: int | None = 8000\n\n # Enable reasoning for VLLM\n enable_reasoning: bool | None\n # Reasoning parser for VLLM\n reasoning_parser: str | None\n\nqat: QATConfig | None\n # For QATConfig:\n # Fake quantization layout to use for activation quantization. Valid options are\n # \"int4\" and \"int8\"\n activation_dtype: TorchIntDType | None\n # Fake quantization layout to use for weight quantization. Valid options are \"int4\"\n # and \"int8\"\n weight_dtype: TorchIntDType = TorchIntDType.int8\n # Quantize embedding\n quantize_embedding: bool | None = False\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n # The number of steps to apply fake quantization after\n fake_quant_after_n_steps: int | None\n\nquantization: PTQConfig | None\n # For PTQConfig:\n # Fake quantization layout to use for weight quantization. Valid options are uintX for\n # X in [1, 2, 3, 4, 5, 6, 7], or int4, or int8\n weight_dtype: TorchIntDType = TorchIntDType.int8\n # Fake quantization layout to use for activation quantization. Valid options are\n # \"int4\" and \"int8\"\n activation_dtype: TorchIntDType | None\n # Whether to quantize the embedding layer.\n quantize_embedding: bool | None\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n\n# Reward modelling: `True` or `False`\nreward_model: bool | None\n# Process reward modelling: `True` or `False`\nprocess_reward_model: bool | None\nnum_labels: int | None\n\n# Whether to perform weighting in DPO trainer\ndpo_use_weighting: bool | None\ndpo_use_logits_to_keep: bool | None\ndpo_label_smoothing: float | None\ndpo_norm_loss: bool | None\ndpo_padding_free: bool | None\ndpo_generate_during_eval: bool | None\n\n# A list of one or more datasets to finetune the model with\ndatasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n# A list of one or more datasets to eval the model with. You can use either\n# test_datasets, or val_set_size, but not both.\ntest_datasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n# If false, the datasets will not be shuffled and will keep their original order in\n# `datasets`. The same applies to the `test_datasets` option and the\n# `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: bool | None = True\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: str | None\n# Num shards for whole dataset\ndataset_shard_num: int | None\n# Index of shard to use for whole dataset\ndataset_shard_idx: int | None\nskip_prepare_dataset: bool | None = False\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset: Annotated[list[PretrainingDataset | SFTDataset], MinLen(1)] | None\n # For PretrainingDataset:\n name: str | None\n path: str | None\n split: str | None = train\n text_column: str | None = text\n type: str | None = pretrain\n trust_remote_code: bool | None = False\n data_files: str | None\n skip: int | None\n\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n# The maximum number of processes to use while preprocessing your input dataset. This\n# defaults to `os.cpu_count()` if not set.\ndataset_processes: int | None = 4\n# Deduplicates datasets and test_datasets with identical entries\ndataset_exact_deduplication: bool | None\n# Keep dataset in memory while preprocessing. Only needed if cached dataset is taking\n# too much storage\ndataset_keep_in_memory: bool | None\ndataloader_pin_memory: bool | None\ndataloader_num_workers: int | None\ndataloader_prefetch_factor: int | None\ndataloader_drop_last: bool | None\n\naccelerator_config: dict[str, Any] | None\n\nremove_unused_columns: bool | None\n\n# Push prepared dataset to hub - repo_org/repo_name\npush_dataset_to_hub: str | None\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private\n# datasets. Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: bool | None\n\ndevice: Any | None\n# Passed through to transformers when loading the model when launched without\n# accelerate. Use `sequential` when training w/ model parallelism to limit memory\ndevice_map: Any | None\nworld_size: int | None\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank: int | None\nddp: bool | None\n\n# Seed for reproducibility\nseed: int | None\n# Advanced DDP Arguments - timeout\nddp_timeout: int | None\n# Advanced DDP Arguments - bucket cap in MB\nddp_bucket_cap_mb: int | None\n# Advanced DDP Arguments - broadcast buffers\nddp_broadcast_buffers: bool | None\nddp_find_unused_parameters: bool | None\n\n# Approximate number of predictions sent to wandb depending on batch size. Enabled above\n# 0. Default is 0\neval_table_size: int | None\n# Total number of tokens generated for predictions sent to wandb. Default is 128\neval_max_new_tokens: int | None\n# Whether to run causal language model evaluation for metrics in\n# `eval_causal_lm_metrics`\ndo_causal_lm_eval: bool | None\n# HF evaluate metrics used during evaluation. Default is ['sacrebleu', 'comet', 'ter',\n# 'chrf', 'perplexity']\neval_causal_lm_metrics: list[str] | None\ndo_bench_eval: bool | None\nbench_dataset: str | None\nbench_split: str | None\nmetric_for_best_model: str | None\ngreater_is_better: bool | None\n\n# High loss value, indicating the learning has broken down (a good estimate is ~2 times\n# the loss at the start of training)\nloss_watchdog_threshold: float | None\n# Number of high-loss steps in a row before the trainer aborts (default: 3)\nloss_watchdog_patience: int | None\n\ngc_steps: int | None\n\n# Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection.\n# require >=ampere\nbf16: Literal['auto'] | bool | None = auto\n# Use CUDA fp16\nfp16: bool | None\nfp8: bool | None\n# No AMP (automatic mixed precision) - require >=ampere\nbfloat16: bool | None\n# No AMP (automatic mixed precision)\nfloat16: bool | None\n# Use CUDA tf32 - require >=ampere\ntf32: bool | None\nfloat32: bool | None\n\n# Whether to use gradient checkpointing. Available options are: true, false, 'offload',\n# 'offload_disk'.\n# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: Literal['offload', 'offload_disk'] | bool | None = False\n# Additional kwargs to pass to the trainer for gradient checkpointing\ngradient_checkpointing_kwargs: dict[str, Any] | None\n\nunfrozen_parameters: list[str] | None\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: int = 512\n# The maximum length of an input for evaluation. If not specified, defaults to\n# sequence_len\neval_sequence_len: int | None\nmin_sample_len: int | None\n# maximum prompt length for RL training\nmax_prompt_len: int = 512\n# Use efficient multi-packing with block diagonal attention and per sequence\n# position_ids. Recommend set to 'true'\nsample_packing: bool | None\n# The number of samples packed at a time. Increasing the following values helps with\n# packing, but usually only slightly (<%1.)\nsample_packing_group_size: int | None = 100000\n# The number of samples which can be packed into one sequence. Increase if using a large\n# sequence_len with many short samples.\nsample_packing_bin_size: int | None = 200\n# Whether to pack samples sequentially\nsample_packing_sequentially: bool | None\n# The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or\n# 'forkserver'\nsample_packing_mp_start_method: str | None\n# Set to 'false' if getting errors during eval with sample_packing on\neval_sample_packing: bool | None\n# Pad inputs so each step uses constant sized buffers. This will reduce memory\n# fragmentation and may prevent OOMs, by re-using memory more efficiently\npad_to_sequence_len: bool | None\n# Whether to use sequential sampling for curriculum learning\ncurriculum_sampling: bool | None\nmultipack_real_batches: bool | None\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation: bool | None\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening: Literal['auto'] | bool | None\n\nuse_pose: bool | None\npose_split_on_token_ids: list[int] | None\npose_max_context_len: int | None\npose_num_chunks: int | None\n\npretrain_multipack_buffer_size: int | None = 10000\n# whether to prevent cross attention for packed sequences during pretraining\npretrain_multipack_attn: bool | None = True\n\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers\nxformers_attention: bool | None\n# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/\n# torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention: bool | None\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention: bool | None\nflex_attention: bool | None\nflex_attn_compile_kwargs: dict[str, Any] | None\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention\nflash_attention: bool | None\n# Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_cross_entropy: bool | None\n# Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_rms_norm: bool | None\n# Whether to fuse QKV into a single operation\nflash_attn_fuse_qkv: bool | None\n# Whether to fuse part of the MLP into a single operation\nflash_attn_fuse_mlp: bool | None\n# Whether to use bettertransformers\nflash_optimum: bool | None\n\neager_attention: bool | None\n\nunsloth_cross_entropy_loss: bool | None\nunsloth_lora_mlp: bool | None\nunsloth_lora_qkv: bool | None\nunsloth_lora_o: bool | None\nunsloth_rms_norm: bool | None\nunsloth_rope: bool | None\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_mlp_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_qkv_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_o_kernel: bool | None\n\n# Whether to use chunked cross entropy loss for memory efficiency\nchunked_cross_entropy: bool | None\n# Number of chunks to use for chunked cross entropy loss\nchunked_cross_entropy_num_chunks: int | None\n\n# Whether to use ALST tiled mlp for memory efficient long context\ntiled_mlp: bool | None\n\n# Number of shards to use for ALST tiled mlp. If unset, it will be set based on\n# seqlen/hidden_size\ntiled_mlp_num_shards: int | None\n\nllama4_linearized_experts: bool | None\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed: str | dict[str, Any] | None\n# FSDP configuration\nfsdp: list[str] | None\n\n# FSDP configuration options\nfsdp_config: dict[str, Any] | None\n# FSDP version\nfsdp_version: int | None\nfsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for\n# no eval.\nval_set_size: float | None = 0.0\n\n# Set to a divisor of the number of GPUs available to split sequences into chunks of\n# equal size. Use in long context training to prevent OOM when sequences cannot fit into\n# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each\n# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized\n# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more\n# details.\nsequence_parallel_degree: int | None\n# Optional; strides across the key dimension. Larger values use more memory but should\n# make training faster. Must evenly divide the number of KV heads in your model.\nheads_k_stride: int | None\n# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to\n# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing\n# case.\nring_attn_func: RingAttnFunc | None\n\n# Add or change special tokens. If you add tokens here, you don't need to add them to\n# the `tokens` list.\nspecial_tokens: SpecialTokensConfig | None\n # For SpecialTokensConfig:\n bos_token: str | None\n eos_token: str | None\n pad_token: str | None\n unk_token: str | None\n additional_special_tokens: list[str] | None\n\n# Add extra tokens to the tokenizer\ntokens: list[str] | None\n# Mapping token_id to new_token_string to override reserved added_tokens in the\n# tokenizer. Only works for tokens that are not part of the base vocab (aka are\n# added_tokens). Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: dict[int, str] | None\n\n# Whether to use torch.compile and which backend to use. setting to `auto` will enable\n# torch compile when torch>=2.6.0\ntorch_compile: Literal['auto'] | bool | None\n# Backend to use for torch.compile\ntorch_compile_backend: str | None\ntorch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None\n\n# Maximum number of iterations to train for. It precedes num_epochs which means that if\n# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =>\n# `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps: int | None\n# Number of warmup steps. Cannot use with warmup_ratio\nwarmup_steps: int | None\n# Warmup ratio. Cannot use with warmup_steps\nwarmup_ratio: float | None\n# Leave empty to eval at each epoch, integer for every N steps. float for fraction of\n# total steps\neval_steps: int | float | None\n# Number of times per epoch to run evals, mutually exclusive with eval_steps\nevals_per_epoch: int | None\n# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer\n# from `eval_steps`\neval_strategy: str | None\n# Leave empty to save at each epoch, integer for every N steps. float for fraction of\n# total steps\nsave_steps: int | float | None\n# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsaves_per_epoch: int | None\n# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better\n# result is achieved, leave empty to infer from `save_steps`\nsave_strategy: str | None\n# Checkpoints saved at a time\nsave_total_limit: int | None\n# Logging frequency\nlogging_steps: int | None\n# Stop training after this many evaluation losses have increased in a row. https://huggi\n# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin\n# gCallback\nearly_stopping_patience: int | None\nload_best_model_at_end: bool | None = False\n# Save only the model weights, skipping the optimizer. Using this means you can't resume\n# from checkpoints.\nsave_only_model: bool | None = False\n# Use tensorboard for logging\nuse_tensorboard: bool | None\n# Enable the pytorch profiler to capture the first N steps of training to the\n# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more\n# information. Snapshots can be visualized @ https://pytorch.org/memory_viz\nprofiler_steps: int | None\n# bool of whether to include tokens trainer per second in the training metrics. This\n# iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second: bool | None\n\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to\n# add noise to embeddings. Currently only supported on Llama and Mistral\nneftune_noise_alpha: float | None\n\n# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to\n# `beta` in `ORPOConfig` due to trl mapping.\norpo_alpha: float | None\n# Weighting of NLL term in loss from RPO paper\nrpo_alpha: float | None\n# Target reward margin for the SimPO loss\nsimpo_gamma: float | None\n# Weight of the BC regularizer\ncpo_alpha: float | None\n\n# Factor for desirable loss term in KTO loss\nkto_desirable_weight: float | None\n# Factor for undesirable loss term in KTO loss\nkto_undesirable_weight: float | None\n# The beta parameter for the RL training\nrl_beta: float | None\n\n# Defines the max memory usage per gpu on the system. Passed through to transformers\n# when loading the model.\nmax_memory: dict[int | Literal['cpu', 'disk'], int | str] | None\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in\n# gigabytes); default: unset\ngpu_memory_limit: int | str | None\n# Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage: bool | None\n\n# The name of the chat template to use for training, following values are supported:\n# tokenizer_default: Uses the chat template that is available in the\n# tokenizer_config.json. If the chat template is not available in the tokenizer, it will\n# raise an error. This is the default value.\n# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.\n# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not\n# available in the tokenizer. jinja: Uses a custom jinja template for the chat template.\n# The custom jinja template should be provided in the chat_template_jinja field. The\n# selected chat template will be saved to the tokenizer_config.json for easier\n# inferencing\nchat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None\n# Custom jinja template or path to jinja file for chat template. This will be only used\n# if chat_template is set to `jinja` or `null` (in which case chat_template is\n# automatically set to `jinja`). Default is null.\nchat_template_jinja: str | None\n# Additional kwargs to pass to the chat template. This is useful for customizing the\n# chat template. For example, you can pass `thinking=False` to add a generation prompt\n# to the chat template.\nchat_template_kwargs: dict[str, Any] | None\n# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the\n# boundaries between conversation turns. For example: ['/INST', '</s>',\n# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is\n# useful for templates that use multiple delimiter tokens.\neot_tokens: list[str] | None\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: str | None\n\nfix_untrained_tokens: int | list[int] | None\n\nis_preprocess: bool | None\npreprocess_iterable: bool | None\n\n# Total number of tokens - internal use\ntotal_num_tokens: int | None\ntotal_supervised_tokens: int | None\n# You can set these packing optimizations AFTER starting a training at least once. The\n# trainer will provide recommended values for these values.\nsample_packing_eff_est: float | None\naxolotl_config_path: str | None\n\n# Internal use only - Used to identify which the model is based on\nis_falcon_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_llama_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on. Please note that if\n# you set this to true, `padding_side` will be set to 'left' by default\nis_mistral_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_qwen_derived_model: bool | None\n\n# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available\n# plugins or doc below for more details.\n# https://docs.axolotl.ai/docs/custom_integrations.html\nplugins: list[str] | None\n\n# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This\n# can also be a relative path to a model on disk\nbase_model: str (required)\n# If the base_model repo on hf hub doesn't include configuration .json files, You can\n# set that here, or leave this empty to default to base_model\nbase_model_config: str | None\ncls_model_config: str | None\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config: str | None\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast: bool | None\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy: bool | None\n# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-\n# common tokenizer.\ntokenizer_use_mistral_common: bool | None\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: str | None\n# transformers processor class\nprocessor_type: str | None\n# Trust remote code for untrusted source\ntrust_remote_code: bool | None\n\n# Where to save the full-finetuned model to\noutput_dir: str = ./model-out\n# push checkpoints to hub\nhub_model_id: str | None\n# how to push checkpoints to hub\nhub_strategy: str | None\n# Save model as safetensors (require safetensors package). Default True\nsave_safetensors: bool | None = True\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: bool | None = False\n# Use bitsandbytes 4 bit\nload_in_4bit: bool | None = False\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in\n# original model\nadapter: str | None\n# If you already have a lora model trained that you want to load, put that here. This\n# means after training, if you want to test the model, you should set this to the value\n# of `output_dir`. Note that if you merge an adapter to the base model, a new\n# subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir: str | None\nlora_r: int | None\nlora_alpha: int | None\nlora_fan_in_fan_out: bool | None\nlora_target_modules: str | list[str] | None\n# If true, will target all linear modules\nlora_target_linear: bool | None\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules\n# because they need to know the new tokens. For LLaMA and Mistral, you need to save\n# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts\n# tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\nlora_modules_to_save: list[str] | None\nlora_dropout: float | None = 0.0\n# The layer indices to transform, otherwise, apply to all layers\npeft_layers_to_transform: list[int] | None\npeft_layers_pattern: list[str] | None\n\npeft: PeftConfig | None\n # For PeftConfig:\n # Configuration options for loftq initialization for LoRA\n loftq_config: LoftQConfig | None\n # For LoftQConfig:\n # typically 4 bits\n loftq_bits: int = 4\n\n# Whether to use DoRA.\npeft_use_dora: bool | None\n# Whether to use RSLoRA.\npeft_use_rslora: bool | None\n# List of layer indices to replicate.\npeft_layer_replication: list[tuple[int, int]] | None\n# How to initialize LoRA weights. Default to True which is MS original implementation.\npeft_init_lora_weights: bool | str | None\n\n# load qlora model in sharded format for FSDP using answer.ai technique.\nqlora_sharded_model_loading: bool | None = False\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it\n# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: bool | None\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: bool | None\n# optional overrides to the bnb 4bit quantization configuration\nbnb_config_kwargs: dict[str, Any] | None\n\n# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_ratio: float | None\n# loraplus learning rate for lora embedding layers. Default value is 1e-6.\nloraplus_lr_embedding: float | None = 1e-06\n\nmerge_lora: bool | None\n\n# Number of steps per ReLoRA restart\nrelora_steps: int | None\n# Number of per-restart warmup steps\nrelora_warmup_steps: int | None\n# Number of anneal steps for each relora cycle\nrelora_anneal_steps: int | None\n# threshold for optimizer magnitude when pruning\nrelora_prune_ratio: float | None\n# True to perform lora weight merges on cpu during restarts, for modest gpu memory\n# savings\nrelora_cpu_offload: bool | None\n\n# If greater than 1, backpropagation will be skipped and the gradients will be\n# accumulated for the given number of steps.\ngradient_accumulation_steps: int | None = 1\n# The number of samples to include in each batch. This is the number of samples sent to\n# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: int | None = 1\n# Total batch size, we do not recommended setting this manually\nbatch_size: int | None\n# per gpu micro batch size for evals, defaults to value of micro_batch_size\neval_batch_size: int | None\n\n# whether to find batch size that fits in memory. Passed to underlying transformers\n# Trainer\nauto_find_batch_size: bool | None\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: bool | None = False\n# Group similarly sized data to minimize padding. May be slower to start, as it must\n# download and sort the entire dataset. Note that training loss may have an oscillating\n# pattern with this enabled.\ngroup_by_length: bool | None\n\nlearning_rate: str | float (required)\nembedding_lr: float | None\nembedding_lr_scale: float | None\n# Specify weight decay\nweight_decay: float | None = 0.0\n# Specify optimizer\noptimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED\n# Dictionary of arguments to pass to the optimizer\noptim_args: str | dict[str, Any] | None\n# The target modules to optimize, i.e. the module names that you would like to train,\n# right now this is used only for GaLore algorithm\noptim_target_modules: list[str] | Literal['all_linear'] | None\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path: str | None\nlr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler_kwargs: dict[str, Any] | None\nlr_quadratic_warmup: bool | None\n# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of\n# peak lr\ncosine_min_lr_ratio: float | None\n# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means\n# start cosine_min_lr at 80% of training step\ncosine_constant_lr_ratio: float | None\n# Learning rate div factor\nlr_div_factor: float | None\n\nlr_groups: list[LrGroup] | None\n # For LrGroup:\n name: str (required)\n modules: list[str] (required)\n lr: float (required)\n\n# adamw hyperparams\nadam_epsilon: float | None\n# only used for CAME Optimizer\nadam_epsilon2: float | None\n# adamw hyperparams\nadam_beta1: float | None\n# adamw hyperparams\nadam_beta2: float | None\n# only used for CAME Optimizer\nadam_beta3: float | None\n# Gradient clipping max norm\nmax_grad_norm: float | None\nnum_epochs: float = 1.0\n\nuse_wandb: bool | None\n# Set the name of your wandb run\nwandb_name: str | None\n# Set the ID of your wandb run\nwandb_run_id: str | None\n# \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn\n# off wandb\nwandb_mode: str | None\n# Your wandb project name\nwandb_project: str | None\n# A wandb Team name if using a Team\nwandb_entity: str | None\nwandb_watch: str | None\n# \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only\n# at the end of training\nwandb_log_model: str | None\n\nuse_mlflow: bool | None\n# URI to mlflow\nmlflow_tracking_uri: str | None\n# Your experiment name\nmlflow_experiment_name: str | None\n# Your run name\nmlflow_run_name: str | None\n# set to true to copy each saved checkpoint on each save to mlflow artifact registry\nhf_mlflow_log_artifacts: bool | None\n\n# Enable or disable Comet integration.\nuse_comet: bool | None\n# API key for Comet. Recommended to set via `comet login`.\ncomet_api_key: str | None\n# Workspace name in Comet. Defaults to the user's default workspace.\ncomet_workspace: str | None\n# Project name in Comet. Defaults to Uncategorized.\ncomet_project_name: str | None\n# Identifier for the experiment. Used to append data to an existing experiment or\n# control the key of new experiments. Default to a random key.\ncomet_experiment_key: str | None\n# Create a new experiment (\"create\") or log to an existing one (\"get\"). Default\n# (\"get_or_create\") auto-selects based on configuration.\ncomet_mode: str | None\n# Set to True to log data to Comet server, or False for offline storage. Default is\n# True.\ncomet_online: bool | None\n# Dictionary for additional configuration settings, see the doc for more details.\ncomet_experiment_config: dict[str, Any] | None\n\n# the number of activate layers in LISA\nlisa_n_layers: int | None\n# how often to switch layers in LISA\nlisa_step_interval: int | None\n# path under the model to access the layers\nlisa_layers_attribute: str | None = model.layers\n\ngradio_title: str | None\ngradio_share: bool | None\ngradio_server_name: str | None\ngradio_server_port: int | None\ngradio_max_new_tokens: int | None\ngradio_temperature: float | None\n\nuse_ray: bool = False\nray_run_name: str | None\nray_num_workers: int = 1\nresources_per_worker: dict\n\n# The size of the image to resize to. It can be an integer (resized into padded-square\n# image) or a tuple (width, height).If not provided, we will attempt to load from\n# preprocessor.size, otherwise, images won't be resized.\nimage_size: int | tuple[int, int] | None\n# The resampling algorithm to use for image resizing. Default is bilinear. Please refer\n# to PIL.Image.Resampling for more details.\nimage_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None\n\n# optional overrides to the base model configuration\noverrides_of_model_config: dict[str, Any] | None\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs: dict[str, Any] | None\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good\n# choice too\ntype_of_model: str | None\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model: str | None\n\nmax_packed_sequence_len: int | None\nrope_scaling: Any | None\nnoisy_embedding_alpha: float | None\ndpo_beta: float | None\nevaluation_strategy: str | None", + "text": "# Allow overwrite yml config using from cli\nstrict: bool | None = False\n# Resume from a specific checkpoint dir\nresume_from_checkpoint: str | None\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: bool | None\n# Resize the model embeddings when new tokens are added to multiples of 32. This is\n# reported to improve training speed on some models\nresize_token_embeddings_to_32x: bool | None\nmean_resizing_embeddings: bool | None = False\n\n# Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings: bool | None\n# Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs\nembeddings_skip_upcast: bool | None\n\n# Use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl: RLType | None\n\ntrl: TRLConfig | None\n # For TRLConfig:\n # Beta parameter for the RL training. Same as `rl_beta`. Use\n beta: float | None\n # Maximum length of the completion for RL training.\n max_completion_length: int | None\n\n # Whether to use VLLM for RL training.\n use_vllm: bool = False\n # VLLM mode to use, one of 'server' or 'colocate'\n vllm_mode: Literal['server', 'colocate'] | None\n # Host of the vLLM server to connect to.\n vllm_server_host: str | None = 0.0.0.0\n # Port of the vLLM server to connect to.\n vllm_server_port: int | None = 8000\n # Total timeout (in seconds) to wait for the vLLM server to respond.\n vllm_server_timeout: int | None\n # Regex for vLLM guided decoding.\n vllm_guided_decoding_regex: str | None\n\n # List of reward functions to load. Paths must be importable from current dir.\n reward_funcs: list[str] | None\n # List of reward weights for the reward functions.\n reward_weights: list[float] | None\n # Number of generations to sample.\n num_generations: int | None\n # Whether to log completions.\n log_completions: bool | None = False\n # Number of completions to print when log_completions is True.\n num_completions_to_print: int | None\n # Whether to sync the reference model.\n sync_ref_model: bool | None = False\n # Mixup alpha for the reference model.\n ref_model_mixup_alpha: float | None = 0.9\n # Sync steps for the reference model.\n ref_model_sync_steps: int | None = 64\n # Whether to scale rewards by their standard deviation.\n scale_rewards: bool = True\n\n # Sampling temperature for the GRPO policy.\n temperature: float | None\n # Top-p sampling probability for the generation policy.\n top_p: float | None\n # Top-k sampling for the generation policy.\n top_k: int | None\n # Minimum probability for the generation policy.\n min_p: float | None\n # Penalty for tokens that appear in prompt and generated text.\n repetition_penalty: float | None\n # Number of iterations per batch (μ) for GRPO.\n num_iterations: int | None\n # Epsilon value for clipping in the GRPO algorithm.\n epsilon: float | None\n # Upper-bound epsilon value for clipping in the GRPO algorithm.\n epsilon_high: float | None\n # Whether to use Liger loss for GRPO.\n use_liger_loss: bool | None\n # Loss formulation to use. Supported values: grpo, bnpo, dr_grpo.\n loss_type: str | None\n # Whether to exclude truncated completions from loss calculation.\n mask_truncated_completions: bool = False\n\nvllm: VllmConfig | None\n # For VllmConfig:\n # Device to use for VLLM\n device: str | None = auto\n # Tensor parallel size for VLLM\n tensor_parallel_size: int | None\n # Data parallel size for VLLM\n data_parallel_size: int | None\n # GPU memory utilization for VLLM\n gpu_memory_utilization: float | None = 0.9\n # Data type for VLLM\n dtype: str | None = auto\n # Maximum length of the model context for VLLM\n max_model_len: int | None\n # Enable prefix caching for VLLM\n enable_prefix_caching: bool | None\n # Host for the vLLM server to start on\n host: str | None = 0.0.0.0\n # Port of the vLLM server to start on\n port: int | None = 8000\n\n # Enable reasoning for VLLM\n enable_reasoning: bool | None\n # Reasoning parser for VLLM\n reasoning_parser: str | None\n\nqat: QATConfig | None\n # For QATConfig:\n # Fake quantization layout to use for activation quantization. Valid options are\n # \"int4\" and \"int8\"\n activation_dtype: TorchIntDType | None\n # Fake quantization layout to use for weight quantization. Valid options are \"int4\"\n # and \"int8\"\n weight_dtype: TorchIntDType = TorchIntDType.int8\n # Quantize embedding\n quantize_embedding: bool | None = False\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n # The number of steps to apply fake quantization after\n fake_quant_after_n_steps: int | None\n\nquantization: PTQConfig | None\n # For PTQConfig:\n # Fake quantization layout to use for weight quantization. Valid options are uintX for\n # X in [1, 2, 3, 4, 5, 6, 7], or int4, or int8\n weight_dtype: TorchIntDType = TorchIntDType.int8\n # Fake quantization layout to use for activation quantization. Valid options are\n # \"int4\" and \"int8\"\n activation_dtype: TorchIntDType | None\n # Whether to quantize the embedding layer.\n quantize_embedding: bool | None\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n\n# Reward modelling: `True` or `False`\nreward_model: bool | None\n# Process reward modelling: `True` or `False`\nprocess_reward_model: bool | None\nnum_labels: int | None\n\n# Whether to perform weighting in DPO trainer\ndpo_use_weighting: bool | None\ndpo_use_logits_to_keep: bool | None\ndpo_label_smoothing: float | None\ndpo_norm_loss: bool | None\ndpo_padding_free: bool | None\ndpo_generate_during_eval: bool | None\n\n# A list of one or more datasets to finetune the model with\ndatasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n# A list of one or more datasets to eval the model with. You can use either\n# test_datasets, or val_set_size, but not both.\ntest_datasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n# If false, the datasets will not be shuffled and will keep their original order in\n# `datasets`. The same applies to the `test_datasets` option and the\n# `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: bool | None = True\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: str | None\n# Num shards for whole dataset\ndataset_shard_num: int | None\n# Index of shard to use for whole dataset\ndataset_shard_idx: int | None\nskip_prepare_dataset: bool | None = False\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset: Annotated[list[PretrainingDataset | SFTDataset], MinLen(1)] | None\n # For PretrainingDataset:\n name: str | None\n path: str | None\n split: str | None = train\n text_column: str | None = text\n type: str | None = pretrain\n trust_remote_code: bool | None = False\n data_files: str | None\n skip: int | None\n\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n# The maximum number of processes to use while preprocessing your input dataset. This\n# defaults to `os.cpu_count()` if not set.\ndataset_processes: int | None = 4\n# Deduplicates datasets and test_datasets with identical entries\ndataset_exact_deduplication: bool | None\n# Keep dataset in memory while preprocessing. Only needed if cached dataset is taking\n# too much storage\ndataset_keep_in_memory: bool | None\ndataloader_pin_memory: bool | None\ndataloader_num_workers: int | None\ndataloader_prefetch_factor: int | None\ndataloader_drop_last: bool | None\n\naccelerator_config: dict[str, Any] | None\n\nremove_unused_columns: bool | None\n\n# Push prepared dataset to hub - repo_org/repo_name\npush_dataset_to_hub: str | None\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private\n# datasets. Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: bool | None\n\ndevice: Any | None\n# Passed through to transformers when loading the model when launched without\n# accelerate. Use `sequential` when training w/ model parallelism to limit memory\ndevice_map: Any | None\nworld_size: int | None\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank: int | None\nddp: bool | None\n\n# Seed for reproducibility\nseed: int | None\n# Advanced DDP Arguments - timeout\nddp_timeout: int | None\n# Advanced DDP Arguments - bucket cap in MB\nddp_bucket_cap_mb: int | None\n# Advanced DDP Arguments - broadcast buffers\nddp_broadcast_buffers: bool | None\nddp_find_unused_parameters: bool | None\n\n# Approximate number of predictions sent to wandb depending on batch size. Enabled above\n# 0. Default is 0\neval_table_size: int | None\n# Total number of tokens generated for predictions sent to wandb. Default is 128\neval_max_new_tokens: int | None\n# Whether to run causal language model evaluation for metrics in\n# `eval_causal_lm_metrics`\ndo_causal_lm_eval: bool | None\n# HF evaluate metrics used during evaluation. Default is ['sacrebleu', 'comet', 'ter',\n# 'chrf', 'perplexity']\neval_causal_lm_metrics: list[str] | None\ndo_bench_eval: bool | None\nbench_dataset: str | None\nbench_split: str | None\nmetric_for_best_model: str | None\ngreater_is_better: bool | None\n\n# High loss value, indicating the learning has broken down (a good estimate is ~2 times\n# the loss at the start of training)\nloss_watchdog_threshold: float | None\n# Number of high-loss steps in a row before the trainer aborts (default: 3)\nloss_watchdog_patience: int | None\n\n# Run garbage collection every `gc_steps` steps. -1 will run on epoch end and before\n# evaluations. Default is 0 (disabled).\ngc_steps: int | None\n\n# Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection.\n# require >=ampere\nbf16: Literal['auto'] | bool | None = auto\n# Use CUDA fp16\nfp16: bool | None\nfp8: bool | None\n# No AMP (automatic mixed precision) - require >=ampere\nbfloat16: bool | None\n# No AMP (automatic mixed precision)\nfloat16: bool | None\n# Use CUDA tf32 - require >=ampere\ntf32: bool | None\nfloat32: bool | None\n\n# Whether to use gradient checkpointing. Available options are: true, false, 'offload',\n# 'offload_disk'.\n# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: Literal['offload', 'offload_disk'] | bool | None = False\n# Additional kwargs to pass to the trainer for gradient checkpointing\ngradient_checkpointing_kwargs: dict[str, Any] | None\n# Whether to offload activations. Available options are: true, false, 'legacy', 'disk'.\nactivation_offloading: Literal['legacy', 'disk'] | bool | None = False\n\nunfrozen_parameters: list[str] | None\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: int = 512\n# The maximum length of an input for evaluation. If not specified, defaults to\n# sequence_len\neval_sequence_len: int | None\nmin_sample_len: int | None\n# maximum prompt length for RL training\nmax_prompt_len: int = 512\n# Use efficient multi-packing with block diagonal attention and per sequence\n# position_ids. Recommend set to 'true'\nsample_packing: bool | None\n# The number of samples packed at a time. Increasing the following values helps with\n# packing, but usually only slightly (<%1.)\nsample_packing_group_size: int | None = 100000\n# The number of samples which can be packed into one sequence. Increase if using a large\n# sequence_len with many short samples.\nsample_packing_bin_size: int | None = 200\n# Whether to pack samples sequentially\nsample_packing_sequentially: bool | None\n# The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or\n# 'forkserver'\nsample_packing_mp_start_method: str | None\n# Set to 'false' if getting errors during eval with sample_packing on\neval_sample_packing: bool | None\n# Pad inputs so each step uses constant sized buffers. This will reduce memory\n# fragmentation and may prevent OOMs, by re-using memory more efficiently\npad_to_sequence_len: bool | None\n# Whether to use sequential sampling for curriculum learning\ncurriculum_sampling: bool | None\nmultipack_real_batches: bool | None\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation: bool | None\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening: Literal['auto'] | bool | None\n\nuse_pose: bool | None\npose_split_on_token_ids: list[int] | None\npose_max_context_len: int | None\npose_num_chunks: int | None\n\npretrain_multipack_buffer_size: int | None = 10000\n# whether to prevent cross attention for packed sequences during pretraining\npretrain_multipack_attn: bool | None = True\n\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers\nxformers_attention: bool | None\n# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/\n# torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention: bool | None\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention: bool | None\nflex_attention: bool | None\nflex_attn_compile_kwargs: dict[str, Any] | None\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention\nflash_attention: bool | None\n# Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_cross_entropy: bool | None\n# Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_rms_norm: bool | None\n# Whether to fuse QKV into a single operation\nflash_attn_fuse_qkv: bool | None\n# Whether to fuse part of the MLP into a single operation\nflash_attn_fuse_mlp: bool | None\n# Whether to use bettertransformers\nflash_optimum: bool | None\n\neager_attention: bool | None\n\nunsloth_cross_entropy_loss: bool | None\nunsloth_lora_mlp: bool | None\nunsloth_lora_qkv: bool | None\nunsloth_lora_o: bool | None\nunsloth_rms_norm: bool | None\nunsloth_rope: bool | None\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_mlp_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_qkv_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_o_kernel: bool | None\n\n# Whether to use chunked cross entropy loss for memory efficiency\nchunked_cross_entropy: bool | None\n# Number of chunks to use for chunked cross entropy loss\nchunked_cross_entropy_num_chunks: int | None\n\n# Whether to use ALST tiled mlp for memory efficient long context\ntiled_mlp: bool | None\n\n# Number of shards to use for ALST tiled mlp. If unset, it will be set based on\n# seqlen/hidden_size\ntiled_mlp_num_shards: int | None\n\nllama4_linearized_experts: bool | None\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed: str | dict[str, Any] | None\n# FSDP configuration\nfsdp: list[str] | None\n\n# FSDP configuration options\nfsdp_config: dict[str, Any] | None\n# FSDP version\nfsdp_version: int | None\nfsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for\n# no eval.\nval_set_size: float | None = 0.0\n\n# Set to a divisor of the number of GPUs available to split sequences into chunks of\n# equal size. Use in long context training to prevent OOM when sequences cannot fit into\n# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each\n# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized\n# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more\n# details.\nsequence_parallel_degree: int | None\n# Optional; strides across the key dimension. Larger values use more memory but should\n# make training faster. Must evenly divide the number of KV heads in your model.\nheads_k_stride: int | None\n# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to\n# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing\n# case.\nring_attn_func: RingAttnFunc | None\n\n# Add or change special tokens. If you add tokens here, you don't need to add them to\n# the `tokens` list.\nspecial_tokens: SpecialTokensConfig | None\n # For SpecialTokensConfig:\n bos_token: str | None\n eos_token: str | None\n pad_token: str | None\n unk_token: str | None\n additional_special_tokens: list[str] | None\n\n# Add extra tokens to the tokenizer\ntokens: list[str] | None\n# Mapping token_id to new_token_string to override reserved added_tokens in the\n# tokenizer. Only works for tokens that are not part of the base vocab (aka are\n# added_tokens). Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: dict[int, str] | None\n\n# Whether to use torch.compile and which backend to use. setting to `auto` will enable\n# torch compile when torch>=2.6.0\ntorch_compile: Literal['auto'] | bool | None\n# Backend to use for torch.compile\ntorch_compile_backend: str | None\ntorch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None\n\n# Maximum number of iterations to train for. It precedes num_epochs which means that if\n# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =>\n# `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps: int | None\n# Number of warmup steps. Cannot use with warmup_ratio\nwarmup_steps: int | None\n# Warmup ratio. Cannot use with warmup_steps\nwarmup_ratio: float | None\n# Leave empty to eval at each epoch, integer for every N steps. float for fraction of\n# total steps\neval_steps: int | float | None\n# Number of times per epoch to run evals, mutually exclusive with eval_steps\nevals_per_epoch: int | None\n# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer\n# from `eval_steps`\neval_strategy: str | None\n# Leave empty to save at each epoch, integer for every N steps. float for fraction of\n# total steps\nsave_steps: int | float | None\n# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsaves_per_epoch: int | None\n# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better\n# result is achieved, leave empty to infer from `save_steps`\nsave_strategy: str | None\n# Checkpoints saved at a time\nsave_total_limit: int | None\n# Logging frequency\nlogging_steps: int | None\n# Stop training after this many evaluation losses have increased in a row. https://huggi\n# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin\n# gCallback\nearly_stopping_patience: int | None\nload_best_model_at_end: bool | None = False\n# Save only the model weights, skipping the optimizer. Using this means you can't resume\n# from checkpoints.\nsave_only_model: bool | None = False\n# Use tensorboard for logging\nuse_tensorboard: bool | None\n# Enable the pytorch profiler to capture the first N steps of training to the\n# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more\n# information. Snapshots can be visualized @ https://pytorch.org/memory_viz\nprofiler_steps: int | None\n# Which step to start the profiler at. Useful for only capturing a few steps mid-run.\nprofiler_steps_start: int | None = 0\n# bool of whether to include tokens trainer per second in the training metrics. This\n# iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second: bool | None\n\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to\n# add noise to embeddings. Currently only supported on Llama and Mistral\nneftune_noise_alpha: float | None\n\n# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to\n# `beta` in `ORPOConfig` due to trl mapping.\norpo_alpha: float | None\n# Weighting of NLL term in loss from RPO paper\nrpo_alpha: float | None\n# Target reward margin for the SimPO loss\nsimpo_gamma: float | None\n# Weight of the BC regularizer\ncpo_alpha: float | None\n\n# Factor for desirable loss term in KTO loss\nkto_desirable_weight: float | None\n# Factor for undesirable loss term in KTO loss\nkto_undesirable_weight: float | None\n# The beta parameter for the RL training\nrl_beta: float | None\n\n# Defines the max memory usage per gpu on the system. Passed through to transformers\n# when loading the model.\nmax_memory: dict[int | Literal['cpu', 'disk'], int | str] | None\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in\n# gigabytes); default: unset\ngpu_memory_limit: int | str | None\n# Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage: bool | None\n\n# The name of the chat template to use for training, following values are supported:\n# tokenizer_default: Uses the chat template that is available in the\n# tokenizer_config.json. If the chat template is not available in the tokenizer, it will\n# raise an error. This is the default value.\n# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.\n# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not\n# available in the tokenizer. jinja: Uses a custom jinja template for the chat template.\n# The custom jinja template should be provided in the chat_template_jinja field. The\n# selected chat template will be saved to the tokenizer_config.json for easier\n# inferencing\nchat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None\n# Custom jinja template or path to jinja file for chat template. This will be only used\n# if chat_template is set to `jinja` or `null` (in which case chat_template is\n# automatically set to `jinja`). Default is null.\nchat_template_jinja: str | None\n# Additional kwargs to pass to the chat template. This is useful for customizing the\n# chat template. For example, you can pass `thinking=False` to add a generation prompt\n# to the chat template.\nchat_template_kwargs: dict[str, Any] | None\n# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the\n# boundaries between conversation turns. For example: ['/INST', '</s>',\n# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is\n# useful for templates that use multiple delimiter tokens.\neot_tokens: list[str] | None\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: str | None\n\nfix_untrained_tokens: int | list[int] | None\n\nis_preprocess: bool | None\npreprocess_iterable: bool | None\n\n# Total number of tokens - internal use\ntotal_num_tokens: int | None\ntotal_supervised_tokens: int | None\n# You can set these packing optimizations AFTER starting a training at least once. The\n# trainer will provide recommended values for these values.\nsample_packing_eff_est: float | None\naxolotl_config_path: str | None\n\n# Internal use only - Used to identify which the model is based on\nis_falcon_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_llama_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on. Please note that if\n# you set this to true, `padding_side` will be set to 'left' by default\nis_mistral_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_qwen_derived_model: bool | None\n\n# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available\n# plugins or doc below for more details.\n# https://docs.axolotl.ai/docs/custom_integrations.html\nplugins: list[str] | None\n\n# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This\n# can also be a relative path to a model on disk\nbase_model: str (required)\n# If the base_model repo on hf hub doesn't include configuration .json files, You can\n# set that here, or leave this empty to default to base_model\nbase_model_config: str | None\ncls_model_config: str | None\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config: str | None\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast: bool | None\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy: bool | None\n# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-\n# common tokenizer.\ntokenizer_use_mistral_common: bool | None\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: str | None\n# transformers processor class\nprocessor_type: str | None\n# Trust remote code for untrusted source\ntrust_remote_code: bool | None\n\n# Where to save the full-finetuned model to\noutput_dir: str = ./model-out\n# push checkpoints to hub\nhub_model_id: str | None\n# how to push checkpoints to hub\nhub_strategy: str | None\n# Save model as safetensors (require safetensors package). Default True\nsave_safetensors: bool | None = True\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: bool | None = False\n# Use bitsandbytes 4 bit\nload_in_4bit: bool | None = False\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in\n# original model\nadapter: str | None\n# If you already have a lora model trained that you want to load, put that here. This\n# means after training, if you want to test the model, you should set this to the value\n# of `output_dir`. Note that if you merge an adapter to the base model, a new\n# subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir: str | None\nlora_r: int | None\nlora_alpha: int | None\nlora_fan_in_fan_out: bool | None\nlora_target_modules: str | list[str] | None\n# If true, will target all linear modules\nlora_target_linear: bool | None\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules\n# because they need to know the new tokens. For LLaMA and Mistral, you need to save\n# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts\n# tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\nlora_modules_to_save: list[str] | None\nlora_dropout: float | None = 0.0\n# The layer indices to transform, otherwise, apply to all layers\npeft_layers_to_transform: list[int] | None\npeft_layers_pattern: list[str] | None\n\npeft: PeftConfig | None\n # For PeftConfig:\n # Configuration options for loftq initialization for LoRA\n loftq_config: LoftQConfig | None\n # For LoftQConfig:\n # typically 4 bits\n loftq_bits: int = 4\n\n# Whether to use DoRA.\npeft_use_dora: bool | None\n# Whether to use RSLoRA.\npeft_use_rslora: bool | None\n# List of layer indices to replicate.\npeft_layer_replication: list[tuple[int, int]] | None\n# How to initialize LoRA weights. Default to True which is MS original implementation.\npeft_init_lora_weights: bool | str | None\n\n# load qlora model in sharded format for FSDP using answer.ai technique.\nqlora_sharded_model_loading: bool | None = False\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it\n# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: bool | None\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: bool | None\n# optional overrides to the bnb 4bit quantization configuration\nbnb_config_kwargs: dict[str, Any] | None\n\n# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_ratio: float | None\n# loraplus learning rate for lora embedding layers. Default value is 1e-6.\nloraplus_lr_embedding: float | None = 1e-06\n\nmerge_lora: bool | None\n\n# Number of steps per ReLoRA restart\nrelora_steps: int | None\n# Number of per-restart warmup steps\nrelora_warmup_steps: int | None\n# Number of anneal steps for each relora cycle\nrelora_anneal_steps: int | None\n# threshold for optimizer magnitude when pruning\nrelora_prune_ratio: float | None\n# True to perform lora weight merges on cpu during restarts, for modest gpu memory\n# savings\nrelora_cpu_offload: bool | None\n\n# If greater than 1, backpropagation will be skipped and the gradients will be\n# accumulated for the given number of steps.\ngradient_accumulation_steps: int | None = 1\n# The number of samples to include in each batch. This is the number of samples sent to\n# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: int | None = 1\n# Total batch size, we do not recommended setting this manually\nbatch_size: int | None\n# per gpu micro batch size for evals, defaults to value of micro_batch_size\neval_batch_size: int | None\n\n# whether to find batch size that fits in memory. Passed to underlying transformers\n# Trainer\nauto_find_batch_size: bool | None\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: bool | None = False\n# Group similarly sized data to minimize padding. May be slower to start, as it must\n# download and sort the entire dataset. Note that training loss may have an oscillating\n# pattern with this enabled.\ngroup_by_length: bool | None\n\nlearning_rate: str | float (required)\nembedding_lr: float | None\nembedding_lr_scale: float | None\n# Specify weight decay\nweight_decay: float | None = 0.0\n# Specify optimizer\noptimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED\n# Dictionary of arguments to pass to the optimizer\noptim_args: str | dict[str, Any] | None\n# The target modules to optimize, i.e. the module names that you would like to train,\n# right now this is used only for GaLore algorithm\noptim_target_modules: list[str] | Literal['all_linear'] | None\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path: str | None\nlr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler_kwargs: dict[str, Any] | None\nlr_quadratic_warmup: bool | None\n# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of\n# peak lr\ncosine_min_lr_ratio: float | None\n# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means\n# start cosine_min_lr at 80% of training step\ncosine_constant_lr_ratio: float | None\n# Learning rate div factor\nlr_div_factor: float | None\n\nlr_groups: list[LrGroup] | None\n # For LrGroup:\n name: str (required)\n modules: list[str] (required)\n lr: float (required)\n\n# adamw hyperparams\nadam_epsilon: float | None\n# only used for CAME Optimizer\nadam_epsilon2: float | None\n# adamw hyperparams\nadam_beta1: float | None\n# adamw hyperparams\nadam_beta2: float | None\n# only used for CAME Optimizer\nadam_beta3: float | None\n# Gradient clipping max norm\nmax_grad_norm: float | None\nnum_epochs: float = 1.0\n\nuse_wandb: bool | None\n# Set the name of your wandb run\nwandb_name: str | None\n# Set the ID of your wandb run\nwandb_run_id: str | None\n# \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn\n# off wandb\nwandb_mode: str | None\n# Your wandb project name\nwandb_project: str | None\n# A wandb Team name if using a Team\nwandb_entity: str | None\nwandb_watch: str | None\n# \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only\n# at the end of training\nwandb_log_model: str | None\n\nuse_mlflow: bool | None\n# URI to mlflow\nmlflow_tracking_uri: str | None\n# Your experiment name\nmlflow_experiment_name: str | None\n# Your run name\nmlflow_run_name: str | None\n# set to true to copy each saved checkpoint on each save to mlflow artifact registry\nhf_mlflow_log_artifacts: bool | None\n\n# Enable or disable Comet integration.\nuse_comet: bool | None\n# API key for Comet. Recommended to set via `comet login`.\ncomet_api_key: str | None\n# Workspace name in Comet. Defaults to the user's default workspace.\ncomet_workspace: str | None\n# Project name in Comet. Defaults to Uncategorized.\ncomet_project_name: str | None\n# Identifier for the experiment. Used to append data to an existing experiment or\n# control the key of new experiments. Default to a random key.\ncomet_experiment_key: str | None\n# Create a new experiment (\"create\") or log to an existing one (\"get\"). Default\n# (\"get_or_create\") auto-selects based on configuration.\ncomet_mode: str | None\n# Set to True to log data to Comet server, or False for offline storage. Default is\n# True.\ncomet_online: bool | None\n# Dictionary for additional configuration settings, see the doc for more details.\ncomet_experiment_config: dict[str, Any] | None\n\n# the number of activate layers in LISA\nlisa_n_layers: int | None\n# how often to switch layers in LISA\nlisa_step_interval: int | None\n# path under the model to access the layers\nlisa_layers_attribute: str | None = model.layers\n\ngradio_title: str | None\ngradio_share: bool | None\ngradio_server_name: str | None\ngradio_server_port: int | None\ngradio_max_new_tokens: int | None\ngradio_temperature: float | None\n\nuse_ray: bool = False\nray_run_name: str | None\nray_num_workers: int = 1\nresources_per_worker: dict\n\n# The size of the image to resize to. It can be an integer (resized into padded-square\n# image) or a tuple (width, height).If not provided, we will attempt to load from\n# preprocessor.size, otherwise, images won't be resized.\nimage_size: int | tuple[int, int] | None\n# The resampling algorithm to use for image resizing. Default is bilinear. Please refer\n# to PIL.Image.Resampling for more details.\nimage_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None\n\n# optional overrides to the base model configuration\noverrides_of_model_config: dict[str, Any] | None\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs: dict[str, Any] | None\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good\n# choice too\ntype_of_model: str | None\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model: str | None\n\nmax_packed_sequence_len: int | None\nrope_scaling: Any | None\nnoisy_embedding_alpha: float | None\ndpo_beta: float | None\nevaluation_strategy: str | None", "crumbs": [ "Getting Started", "Config Reference" @@ -2804,14 +2815,14 @@ "href": "docs/api/utils.callbacks.profiler.html", "title": "utils.callbacks.profiler", "section": "", - "text": "utils.callbacks.profiler\nHF Trainer callback for creating pytorch profiling snapshots\n\n\n\n\n\nName\nDescription\n\n\n\n\nPytorchProfilerCallback\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps.\n\n\n\n\n\nutils.callbacks.profiler.PytorchProfilerCallback(steps_to_profile=5)\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps." + "text": "utils.callbacks.profiler\nHF Trainer callback for creating pytorch profiling snapshots\n\n\n\n\n\nName\nDescription\n\n\n\n\nPytorchProfilerCallback\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps.\n\n\n\n\n\nutils.callbacks.profiler.PytorchProfilerCallback(\n steps_to_profile=5,\n profiler_steps_start=0,\n)\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps." }, { "objectID": "docs/api/utils.callbacks.profiler.html#classes", "href": "docs/api/utils.callbacks.profiler.html#classes", "title": "utils.callbacks.profiler", "section": "", - "text": "Name\nDescription\n\n\n\n\nPytorchProfilerCallback\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps.\n\n\n\n\n\nutils.callbacks.profiler.PytorchProfilerCallback(steps_to_profile=5)\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps." + "text": "Name\nDescription\n\n\n\n\nPytorchProfilerCallback\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps.\n\n\n\n\n\nutils.callbacks.profiler.PytorchProfilerCallback(\n steps_to_profile=5,\n profiler_steps_start=0,\n)\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps." }, { "objectID": "docs/api/utils.schemas.enums.html", diff --git a/sitemap.xml b/sitemap.xml index ae9afad7f..4675d45df 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,758 +2,762 @@ https://docs.axolotl.ai/TODO.html - 2025-07-14T18:19:27.651Z + 2025-07-15T00:11:41.062Z https://docs.axolotl.ai/index.html - 2025-07-14T18:19:27.671Z + 2025-07-15T00:11:41.082Z https://docs.axolotl.ai/docs/debugging.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/amd_hpc.html - 2025-07-14T18:19:27.652Z + 2025-07-15T00:11:41.063Z https://docs.axolotl.ai/docs/api/utils.callbacks.mlflow_.html - 2025-07-14T18:22:46.179Z + 2025-07-15T00:14:48.504Z https://docs.axolotl.ai/docs/api/monkeypatch.llama_expand_mask.html - 2025-07-14T18:22:45.612Z + 2025-07-15T00:14:47.943Z https://docs.axolotl.ai/docs/api/loaders.patch_manager.html - 2025-07-14T18:22:45.224Z + 2025-07-15T00:14:47.557Z https://docs.axolotl.ai/docs/api/core.chat.format.llama3x.html - 2025-07-14T18:22:44.911Z + 2025-07-15T00:14:47.255Z https://docs.axolotl.ai/docs/api/cli.train.html - 2025-07-14T18:22:44.965Z + 2025-07-15T00:14:47.308Z https://docs.axolotl.ai/docs/api/utils.callbacks.perplexity.html - 2025-07-14T18:22:46.170Z + 2025-07-15T00:14:48.496Z https://docs.axolotl.ai/docs/api/core.chat.messages.html - 2025-07-14T18:22:44.908Z + 2025-07-15T00:14:47.252Z https://docs.axolotl.ai/docs/api/utils.callbacks.lisa.html - 2025-07-14T18:22:46.175Z + 2025-07-15T00:14:48.501Z https://docs.axolotl.ai/docs/api/cli.merge_sharded_fsdp_weights.html - 2025-07-14T18:22:45.053Z + 2025-07-15T00:14:47.394Z https://docs.axolotl.ai/docs/api/cli.sweeps.html - 2025-07-14T18:22:45.068Z + 2025-07-15T00:14:47.408Z https://docs.axolotl.ai/docs/api/utils.chat_templates.html - 2025-07-14T18:22:45.710Z + 2025-07-15T00:14:48.039Z https://docs.axolotl.ai/docs/api/core.chat.format.shared.html - 2025-07-14T18:22:44.913Z + 2025-07-15T00:14:47.257Z https://docs.axolotl.ai/docs/api/core.trainers.mixins.optimizer.html - 2025-07-14T18:22:45.231Z + 2025-07-15T00:14:47.565Z https://docs.axolotl.ai/docs/api/utils.collators.mamba.html - 2025-07-14T18:22:46.118Z + 2025-07-15T00:14:48.444Z https://docs.axolotl.ai/docs/api/logging_config.html - 2025-07-14T18:22:44.857Z + 2025-07-15T00:14:47.202Z https://docs.axolotl.ai/docs/api/utils.collators.mm_chat.html - 2025-07-14T18:22:46.123Z + 2025-07-15T00:14:48.449Z https://docs.axolotl.ai/docs/api/prompt_strategies.completion.html - 2025-07-14T18:22:45.352Z + 2025-07-15T00:14:47.683Z https://docs.axolotl.ai/docs/api/kernels.utils.html - 2025-07-14T18:22:45.562Z + 2025-07-15T00:14:47.893Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.chat_template.html - 2025-07-14T18:22:45.385Z + 2025-07-15T00:14:47.716Z https://docs.axolotl.ai/docs/api/kernels.swiglu.html - 2025-07-14T18:22:45.553Z + 2025-07-15T00:14:47.884Z https://docs.axolotl.ai/docs/api/common.const.html - 2025-07-14T18:22:46.079Z + 2025-07-15T00:14:48.405Z https://docs.axolotl.ai/docs/api/cli.cloud.base.html - 2025-07-14T18:22:45.116Z + 2025-07-15T00:14:47.449Z https://docs.axolotl.ai/docs/api/utils.callbacks.comet_.html - 2025-07-14T18:22:46.182Z + 2025-07-15T00:14:48.508Z https://docs.axolotl.ai/docs/api/monkeypatch.gradient_checkpointing.offload_cpu.html - 2025-07-14T18:22:45.676Z + 2025-07-15T00:14:48.006Z https://docs.axolotl.ai/docs/api/common.architectures.html - 2025-07-14T18:22:46.077Z + 2025-07-15T00:14:48.404Z https://docs.axolotl.ai/docs/api/prompt_strategies.pygmalion.html - 2025-07-14T18:22:45.379Z + 2025-07-15T00:14:47.710Z https://docs.axolotl.ai/docs/api/utils.schemas.peft.html - 2025-07-14T18:22:45.884Z + 2025-07-15T00:14:48.212Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.user_defined.html - 2025-07-14T18:22:45.408Z + 2025-07-15T00:14:47.739Z https://docs.axolotl.ai/docs/api/utils.schemas.datasets.html - 2025-07-14T18:22:45.875Z + 2025-07-15T00:14:48.204Z https://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_w_system.html - 2025-07-14T18:22:45.325Z + 2025-07-15T00:14:47.657Z https://docs.axolotl.ai/docs/api/prompt_strategies.base.html - 2025-07-14T18:22:45.265Z + 2025-07-15T00:14:47.599Z https://docs.axolotl.ai/docs/api/utils.lora.html - 2025-07-14T18:22:45.715Z + 2025-07-15T00:14:48.044Z https://docs.axolotl.ai/docs/api/prompt_strategies.input_output.html - 2025-07-14T18:22:45.358Z + 2025-07-15T00:14:47.689Z https://docs.axolotl.ai/docs/api/utils.schemas.trl.html - 2025-07-14T18:22:45.887Z + 2025-07-15T00:14:48.215Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.zephyr.html - 2025-07-14T18:22:45.406Z + 2025-07-15T00:14:47.738Z https://docs.axolotl.ai/docs/api/integrations.kd.trainer.html - 2025-07-14T18:22:46.066Z + 2025-07-15T00:14:48.392Z https://docs.axolotl.ai/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html - 2025-07-14T18:22:45.701Z + 2025-07-15T00:14:48.031Z https://docs.axolotl.ai/docs/api/utils.optimizers.adopt.html - 2025-07-14T18:22:45.804Z + 2025-07-15T00:14:48.132Z https://docs.axolotl.ai/docs/api/monkeypatch.data.batch_dataset_fetcher.html - 2025-07-14T18:22:45.669Z + 2025-07-15T00:14:48.001Z https://docs.axolotl.ai/docs/api/cli.cloud.modal_.html - 2025-07-14T18:22:45.122Z + 2025-07-15T00:14:47.455Z https://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_chat.html - 2025-07-14T18:22:45.311Z + 2025-07-15T00:14:47.644Z https://docs.axolotl.ai/docs/api/utils.freeze.html - 2025-07-14T18:22:45.731Z + 2025-07-15T00:14:48.061Z https://docs.axolotl.ai/docs/api/prompt_strategies.bradley_terry.llama3.html - 2025-07-14T18:22:45.450Z + 2025-07-15T00:14:47.782Z https://docs.axolotl.ai/docs/api/integrations.base.html - 2025-07-14T18:22:46.054Z + 2025-07-15T00:14:48.381Z https://docs.axolotl.ai/docs/api/monkeypatch.unsloth_.html - 2025-07-14T18:22:45.668Z + 2025-07-15T00:14:47.999Z https://docs.axolotl.ai/docs/api/prompt_strategies.kto.chatml.html - 2025-07-14T18:22:45.425Z + 2025-07-15T00:14:47.756Z https://docs.axolotl.ai/docs/api/cli.main.html - 2025-07-14T18:22:44.957Z + 2025-07-15T00:14:47.300Z https://docs.axolotl.ai/docs/api/common.datasets.html - 2025-07-14T18:22:46.094Z + 2025-07-15T00:14:48.420Z https://docs.axolotl.ai/docs/api/train.html - 2025-07-14T18:22:44.771Z + 2025-07-15T00:14:47.118Z https://docs.axolotl.ai/docs/api/core.trainers.base.html - 2025-07-14T18:22:45.137Z + 2025-07-15T00:14:47.470Z https://docs.axolotl.ai/docs/api/core.trainers.mixins.scheduler.html - 2025-07-14T18:22:45.241Z + 2025-07-15T00:14:47.574Z https://docs.axolotl.ai/docs/api/utils.ctx_managers.sequence_parallel.html - 2025-07-14T18:22:45.264Z + 2025-07-15T00:14:47.597Z https://docs.axolotl.ai/docs/api/utils.schemas.config.html - 2025-07-14T18:22:45.846Z + 2025-07-15T00:14:48.175Z https://docs.axolotl.ai/docs/api/loaders.tokenizer.html - 2025-07-14T18:22:45.209Z + 2025-07-15T00:14:47.543Z https://docs.axolotl.ai/docs/api/integrations.liger.args.html - 2025-07-14T18:22:46.069Z + 2025-07-15T00:14:48.396Z https://docs.axolotl.ai/docs/api/cli.config.html - 2025-07-14T18:22:45.018Z + 2025-07-15T00:14:47.360Z https://docs.axolotl.ai/docs/api/loaders.processor.html - 2025-07-14T18:22:45.211Z + 2025-07-15T00:14:47.544Z https://docs.axolotl.ai/docs/api/monkeypatch.utils.html - 2025-07-14T18:22:45.648Z + 2025-07-15T00:14:47.980Z https://docs.axolotl.ai/docs/api/integrations.cut_cross_entropy.args.html - 2025-07-14T18:22:46.057Z + 2025-07-15T00:14:48.384Z https://docs.axolotl.ai/docs/api/core.trainers.dpo.trainer.html - 2025-07-14T18:22:45.167Z + 2025-07-15T00:14:47.501Z https://docs.axolotl.ai/docs/api/loaders.adapter.html - 2025-07-14T18:22:45.216Z + 2025-07-15T00:14:47.549Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.llama3.html - 2025-07-14T18:22:45.395Z + 2025-07-15T00:14:47.726Z https://docs.axolotl.ai/docs/api/core.datasets.transforms.chat_builder.html - 2025-07-14T18:22:44.926Z + 2025-07-15T00:14:47.269Z https://docs.axolotl.ai/docs/api/monkeypatch.transformers_fa_utils.html - 2025-07-14T18:22:45.666Z + 2025-07-15T00:14:47.998Z https://docs.axolotl.ai/docs/api/datasets.html - 2025-07-14T18:22:44.793Z + 2025-07-15T00:14:47.139Z https://docs.axolotl.ai/docs/api/monkeypatch.btlm_attn_hijack_flash.html - 2025-07-14T18:22:45.650Z + 2025-07-15T00:14:47.981Z https://docs.axolotl.ai/docs/api/cli.inference.html - 2025-07-14T18:22:45.032Z + 2025-07-15T00:14:47.374Z https://docs.axolotl.ai/docs/api/cli.utils.html - 2025-07-14T18:22:45.103Z + 2025-07-15T00:14:47.439Z https://docs.axolotl.ai/docs/api/cli.preprocess.html - 2025-07-14T18:22:45.062Z + 2025-07-15T00:14:47.402Z https://docs.axolotl.ai/docs/api/loaders.model.html - 2025-07-14T18:22:45.201Z + 2025-07-15T00:14:47.534Z https://docs.axolotl.ai/docs/api/core.trainers.grpo.sampler.html - 2025-07-14T18:22:45.190Z + 2025-07-15T00:14:47.523Z https://docs.axolotl.ai/docs/api/core.trainers.grpo.trainer.html - 2025-07-14T18:22:45.178Z + 2025-07-15T00:14:47.511Z https://docs.axolotl.ai/docs/batch_vs_grad.html - 2025-07-14T18:19:27.652Z + 2025-07-15T00:11:41.063Z https://docs.axolotl.ai/docs/custom_integrations.html - 2025-07-14T18:19:27.652Z + 2025-07-15T00:11:41.063Z https://docs.axolotl.ai/docs/quantize.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/docs/unsloth.html - 2025-07-14T18:19:27.657Z + 2025-07-15T00:11:41.068Z https://docs.axolotl.ai/docs/ray-integration.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/docs/dataset-formats/stepwise_supervised.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/dataset-formats/template_free.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/dataset-formats/index.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/dataset-formats/pretraining.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/multi-gpu.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/docs/torchao.html - 2025-07-14T18:19:27.657Z + 2025-07-15T00:11:41.068Z https://docs.axolotl.ai/docs/cli.html - 2025-07-14T18:19:27.652Z + 2025-07-15T00:11:41.063Z https://docs.axolotl.ai/docs/nccl.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/docs/dataset_preprocessing.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/faq.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/qat.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z - https://docs.axolotl.ai/docs/lr_groups.html - 2025-07-14T18:19:27.656Z - - - https://docs.axolotl.ai/docs/mac.html - 2025-07-14T18:19:27.656Z - - - https://docs.axolotl.ai/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html - 2025-07-14T18:19:27.675Z - - - https://docs.axolotl.ai/src/axolotl/integrations/LICENSE.html - 2025-07-14T18:19:27.675Z + https://docs.axolotl.ai/docs/gradient_checkpointing.html + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/input_output.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z + + + https://docs.axolotl.ai/src/axolotl/integrations/LICENSE.html + 2025-07-15T00:11:41.086Z + + + https://docs.axolotl.ai/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html + 2025-07-15T00:11:41.086Z + + + https://docs.axolotl.ai/docs/mac.html + 2025-07-15T00:11:41.067Z + + + https://docs.axolotl.ai/docs/lr_groups.html + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/docs/dataset_loading.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/getting-started.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/lora_optims.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/docs/multi-node.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/docs/fsdp_qlora.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/inference.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/docs/sequence_parallelism.html - 2025-07-14T18:19:27.657Z + 2025-07-15T00:11:41.068Z https://docs.axolotl.ai/docs/rlhf.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/docs/dataset-formats/tokenized.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/dataset-formats/conversation.html - 2025-07-14T18:19:27.652Z + 2025-07-15T00:11:41.063Z https://docs.axolotl.ai/docs/dataset-formats/inst_tune.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/reward_modelling.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/docs/docker.html - 2025-07-14T18:19:27.653Z + 2025-07-15T00:11:41.064Z https://docs.axolotl.ai/docs/installation.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/docs/multimodal.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/docs/config-reference.html - 2025-07-14T18:22:57.686Z + 2025-07-15T00:15:00.449Z https://docs.axolotl.ai/docs/api/prompt_tokenizers.html - 2025-07-14T18:22:44.848Z + 2025-07-15T00:14:47.193Z https://docs.axolotl.ai/docs/api/utils.schedulers.html - 2025-07-14T18:22:45.773Z + 2025-07-15T00:14:48.102Z https://docs.axolotl.ai/docs/api/utils.samplers.multipack.html - 2025-07-14T18:22:46.164Z + 2025-07-15T00:14:48.490Z https://docs.axolotl.ai/docs/api/prompt_strategies.orcamini.html - 2025-07-14T18:22:45.372Z + 2025-07-15T00:14:47.704Z https://docs.axolotl.ai/docs/api/core.trainers.trl.html - 2025-07-14T18:22:45.151Z + 2025-07-15T00:14:47.485Z https://docs.axolotl.ai/docs/api/prompt_strategies.metharme.html - 2025-07-14T18:22:45.369Z + 2025-07-15T00:14:47.700Z https://docs.axolotl.ai/docs/api/utils.callbacks.profiler.html - 2025-07-14T18:22:46.174Z + 2025-07-15T00:14:48.499Z https://docs.axolotl.ai/docs/api/utils.schemas.enums.html - 2025-07-14T18:22:45.915Z + 2025-07-15T00:14:48.243Z https://docs.axolotl.ai/docs/api/core.trainers.mamba.html - 2025-07-14T18:22:45.157Z + 2025-07-15T00:14:47.490Z https://docs.axolotl.ai/docs/api/monkeypatch.llama_attn_hijack_flash.html - 2025-07-14T18:22:45.587Z + 2025-07-15T00:14:47.918Z https://docs.axolotl.ai/docs/api/monkeypatch.relora.html - 2025-07-14T18:22:45.610Z + 2025-07-15T00:14:47.942Z https://docs.axolotl.ai/docs/api/monkeypatch.stablelm_attn_hijack_flash.html - 2025-07-14T18:22:45.657Z + 2025-07-15T00:14:47.988Z https://docs.axolotl.ai/docs/api/loaders.constants.html - 2025-07-14T18:22:45.225Z + 2025-07-15T00:14:47.559Z https://docs.axolotl.ai/docs/api/utils.callbacks.qat.html - 2025-07-14T18:22:46.189Z + 2025-07-15T00:14:48.514Z https://docs.axolotl.ai/docs/api/utils.schemas.model.html - 2025-07-14T18:22:45.853Z + 2025-07-15T00:14:48.181Z https://docs.axolotl.ai/docs/api/prompt_strategies.stepwise_supervised.html - 2025-07-14T18:22:45.362Z + 2025-07-15T00:14:47.693Z https://docs.axolotl.ai/docs/api/integrations.grokfast.optimizer.html - 2025-07-14T18:22:46.059Z + 2025-07-15T00:14:48.385Z https://docs.axolotl.ai/docs/api/convert.html - 2025-07-14T18:22:44.806Z + 2025-07-15T00:14:47.152Z https://docs.axolotl.ai/docs/api/kernels.quantize.html - 2025-07-14T18:22:45.560Z + 2025-07-15T00:14:47.891Z https://docs.axolotl.ai/docs/api/core.training_args.html - 2025-07-14T18:22:44.885Z + 2025-07-15T00:14:47.230Z https://docs.axolotl.ai/docs/api/prompt_strategies.chat_template.html - 2025-07-14T18:22:45.298Z + 2025-07-15T00:14:47.631Z https://docs.axolotl.ai/docs/api/index.html - 2025-07-14T18:22:44.709Z + 2025-07-15T00:14:47.056Z https://docs.axolotl.ai/docs/api/prompt_strategies.llama2_chat.html - 2025-07-14T18:22:45.346Z + 2025-07-15T00:14:47.677Z https://docs.axolotl.ai/docs/api/utils.trainer.html - 2025-07-14T18:22:45.748Z + 2025-07-15T00:14:48.078Z https://docs.axolotl.ai/docs/api/prompt_strategies.messages.chat.html - 2025-07-14T18:22:45.383Z + 2025-07-15T00:14:47.714Z https://docs.axolotl.ai/docs/api/monkeypatch.lora_kernels.html - 2025-07-14T18:22:45.640Z + 2025-07-15T00:14:47.972Z https://docs.axolotl.ai/docs/api/kernels.lora.html - 2025-07-14T18:22:45.532Z + 2025-07-15T00:14:47.864Z https://docs.axolotl.ai/docs/api/cli.vllm_serve.html - 2025-07-14T18:22:45.113Z + 2025-07-15T00:14:47.446Z https://docs.axolotl.ai/docs/api/utils.schemas.multimodal.html - 2025-07-14T18:22:45.892Z + 2025-07-15T00:14:48.221Z https://docs.axolotl.ai/docs/api/utils.schemas.utils.html - 2025-07-14T18:22:45.920Z + 2025-07-15T00:14:48.248Z https://docs.axolotl.ai/docs/api/monkeypatch.llama_attn_hijack_xformers.html - 2025-07-14T18:22:45.588Z + 2025-07-15T00:14:47.919Z https://docs.axolotl.ai/docs/api/integrations.lm_eval.args.html - 2025-07-14T18:22:46.072Z + 2025-07-15T00:14:48.399Z https://docs.axolotl.ai/docs/api/monkeypatch.mistral_attn_hijack_flash.html - 2025-07-14T18:22:45.602Z + 2025-07-15T00:14:47.934Z https://docs.axolotl.ai/docs/api/utils.collators.core.html - 2025-07-14T18:22:46.096Z + 2025-07-15T00:14:48.422Z https://docs.axolotl.ai/docs/api/core.chat.format.chatml.html - 2025-07-14T18:22:44.910Z + 2025-07-15T00:14:47.254Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.passthrough.html - 2025-07-14T18:22:45.409Z + 2025-07-15T00:14:47.741Z https://docs.axolotl.ai/docs/api/core.datasets.chat.html - 2025-07-14T18:22:44.918Z + 2025-07-15T00:14:47.262Z https://docs.axolotl.ai/docs/api/utils.bench.html - 2025-07-14T18:22:45.723Z + 2025-07-15T00:14:48.053Z https://docs.axolotl.ai/docs/api/utils.schemas.training.html - 2025-07-14T18:22:45.858Z + 2025-07-15T00:14:48.186Z https://docs.axolotl.ai/docs/api/utils.collators.batching.html - 2025-07-14T18:22:46.115Z + 2025-07-15T00:14:48.441Z https://docs.axolotl.ai/docs/api/monkeypatch.llama_patch_multipack.html - 2025-07-14T18:22:45.651Z + 2025-07-15T00:14:47.982Z https://docs.axolotl.ai/docs/api/monkeypatch.multipack.html - 2025-07-14T18:22:45.604Z + 2025-07-15T00:14:47.935Z https://docs.axolotl.ai/docs/api/core.builders.causal.html - 2025-07-14T18:22:44.868Z + 2025-07-15T00:14:47.213Z https://docs.axolotl.ai/docs/api/cli.evaluate.html - 2025-07-14T18:22:44.974Z + 2025-07-15T00:14:47.316Z https://docs.axolotl.ai/docs/api/monkeypatch.trainer_fsdp_optim.html - 2025-07-14T18:22:45.660Z + 2025-07-15T00:14:47.991Z https://docs.axolotl.ai/docs/api/core.trainers.utils.html - 2025-07-14T18:22:45.191Z + 2025-07-15T00:14:47.525Z https://docs.axolotl.ai/docs/api/utils.schemas.integrations.html - 2025-07-14T18:22:45.904Z + 2025-07-15T00:14:48.233Z https://docs.axolotl.ai/docs/api/utils.dict.html - 2025-07-14T18:22:45.796Z + 2025-07-15T00:14:48.125Z https://docs.axolotl.ai/docs/api/core.builders.rl.html - 2025-07-14T18:22:44.873Z + 2025-07-15T00:14:47.217Z https://docs.axolotl.ai/docs/api/prompt_strategies.orpo.chat_template.html - 2025-07-14T18:22:45.447Z + 2025-07-15T00:14:47.778Z https://docs.axolotl.ai/docs/api/core.trainers.relora.html - 2025-07-14T18:22:45.161Z + 2025-07-15T00:14:47.494Z https://docs.axolotl.ai/docs/api/integrations.spectrum.args.html - 2025-07-14T18:22:46.076Z + 2025-07-15T00:14:48.402Z https://docs.axolotl.ai/docs/api/cli.quantize.html - 2025-07-14T18:22:45.127Z + 2025-07-15T00:14:47.460Z https://docs.axolotl.ai/docs/api/cli.checks.html - 2025-07-14T18:22:45.000Z + 2025-07-15T00:14:47.342Z https://docs.axolotl.ai/docs/api/prompt_strategies.kto.llama3.html - 2025-07-14T18:22:45.417Z + 2025-07-15T00:14:47.749Z https://docs.axolotl.ai/docs/api/utils.model_shard_quant.html - 2025-07-14T18:22:45.720Z + 2025-07-15T00:14:48.050Z https://docs.axolotl.ai/docs/api/utils.quantization.html - 2025-07-14T18:22:45.833Z + 2025-07-15T00:14:48.161Z https://docs.axolotl.ai/docs/api/core.trainers.mixins.rng_state_loader.html - 2025-07-14T18:22:45.234Z + 2025-07-15T00:14:47.568Z https://docs.axolotl.ai/docs/api/kernels.geglu.html - 2025-07-14T18:22:45.543Z + 2025-07-15T00:14:47.874Z https://docs.axolotl.ai/docs/api/utils.data.pretraining.html - 2025-07-14T18:22:45.805Z + 2025-07-15T00:14:48.134Z https://docs.axolotl.ai/docs/api/prompt_strategies.kto.user_defined.html - 2025-07-14T18:22:45.427Z + 2025-07-15T00:14:47.758Z https://docs.axolotl.ai/docs/api/core.builders.base.html - 2025-07-14T18:22:44.863Z + 2025-07-15T00:14:47.208Z https://docs.axolotl.ai/docs/api/cli.merge_lora.html - 2025-07-14T18:22:45.041Z + 2025-07-15T00:14:47.382Z https://docs.axolotl.ai/docs/api/monkeypatch.mixtral.html - 2025-07-14T18:22:45.671Z + 2025-07-15T00:14:48.002Z https://docs.axolotl.ai/docs/api/utils.data.sft.html - 2025-07-14T18:22:45.812Z + 2025-07-15T00:14:48.141Z https://docs.axolotl.ai/docs/api/prompt_strategies.user_defined.html - 2025-07-14T18:22:45.333Z + 2025-07-15T00:14:47.665Z https://docs.axolotl.ai/docs/api/utils.tokenization.html - 2025-07-14T18:22:45.708Z + 2025-07-15T00:14:48.038Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.chatml.html - 2025-07-14T18:22:45.405Z + 2025-07-15T00:14:47.736Z https://docs.axolotl.ai/docs/api/models.mamba.modeling_mamba.html - 2025-07-14T18:22:46.095Z + 2025-07-15T00:14:48.421Z https://docs.axolotl.ai/docs/api/cli.args.html - 2025-07-14T18:22:44.993Z + 2025-07-15T00:14:47.336Z https://docs.axolotl.ai/docs/api/evaluate.html - 2025-07-14T18:22:44.781Z + 2025-07-15T00:14:47.128Z https://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_instruct.html - 2025-07-14T18:22:45.313Z + 2025-07-15T00:14:47.645Z https://docs.axolotl.ai/docs/api/utils.distributed.html - 2025-07-14T18:22:45.793Z + 2025-07-15T00:14:48.122Z https://docs.axolotl.ai/docs/multipack.html - 2025-07-14T18:19:27.656Z + 2025-07-15T00:11:41.067Z https://docs.axolotl.ai/examples/colab-notebooks/colab-axolotl-example.html - 2025-07-14T18:19:27.660Z + 2025-07-15T00:11:41.071Z https://docs.axolotl.ai/FAQS.html - 2025-07-14T18:19:27.651Z + 2025-07-15T00:11:41.062Z diff --git a/src/axolotl/integrations/LICENSE.html b/src/axolotl/integrations/LICENSE.html index 1d8b6f4e6..2def09051 100644 --- a/src/axolotl/integrations/LICENSE.html +++ b/src/axolotl/integrations/LICENSE.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + + diff --git a/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html b/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html index d7790a4ff..b8f2d845f 100644 --- a/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html +++ b/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html @@ -390,6 +390,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Sequence Parallelism + +