diff --git a/.nojekyll b/.nojekyll
index 4a329ae43..6d2ac1d06 100644
--- a/.nojekyll
+++ b/.nojekyll
@@ -1 +1 @@
-5fb2374d
\ No newline at end of file
+f6b4483b
\ No newline at end of file
diff --git a/docs/config-reference.html b/docs/config-reference.html
index 768f8056e..8307e2adc 100644
--- a/docs/config-reference.html
+++ b/docs/config-reference.html
@@ -1157,756 +1157,761 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});
# defaults to `os.cpu_count()` if not set. For Runpod VMs, it will default to number of# vCPUs via RUNPOD_CPU_COUNT.dataset_processes: int | None
-# Deduplicates datasets and test_datasets with identical entries
-dataset_exact_deduplication: bool | None
-# Keep dataset in memory while preprocessing. Only needed if cached dataset is taking
-# too much storage
-dataset_keep_in_memory: bool | None
-dataloader_pin_memory: bool | None
-dataloader_num_workers: int | None
-dataloader_prefetch_factor: int | None
-dataloader_drop_last: bool | None
-
-accelerator_config: dict[str, Any] | None
-
-remove_unused_columns: bool | None
-
-# Push prepared dataset to hub - repo_org/repo_name
-push_dataset_to_hub: str | None
-# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private
-# datasets. Required to be true when used in combination with `push_dataset_to_hub`
-hf_use_auth_token: bool | None
-
-device: Any | None
-# Passed through to transformers when loading the model when launched without
-# accelerate. Use `sequential` when training w/ model parallelism to limit memory
-device_map: Any | None
-world_size: int | None
-# Don't mess with this, it's here for accelerate and torchrun
-local_rank: int | None
-ddp: bool | None
-
-# Seed for reproducibility
-seed: int | None
-# Advanced DDP Arguments - timeout
-ddp_timeout: int | None
-# Advanced DDP Arguments - bucket cap in MB
-ddp_bucket_cap_mb: int | None
-# Advanced DDP Arguments - broadcast buffers
-ddp_broadcast_buffers: bool | None
-ddp_find_unused_parameters: bool | None
-
-# Approximate number of predictions sent to wandb depending on batch size. Enabled above
-# 0. Default is 0
-eval_table_size: int | None
-# Total number of tokens generated for predictions sent to wandb. Default is 128
-eval_max_new_tokens: int | None
-# Whether to run causal language model evaluation for metrics in
-# `eval_causal_lm_metrics`
-do_causal_lm_eval: bool | None
-# HF evaluate metrics used during evaluation. Default is ['sacrebleu', 'comet', 'ter',
-# 'chrf', 'perplexity']
-eval_causal_lm_metrics: list[str] | None
-do_bench_eval: bool | None
-bench_dataset: str | None
-bench_split: str | None
-metric_for_best_model: str | None
-greater_is_better: bool | None
-
-# High loss value, indicating the learning has broken down (a good estimate is ~2 times
-# the loss at the start of training)
-loss_watchdog_threshold: float | None
-# Number of high-loss steps in a row before the trainer aborts (default: 3)
-loss_watchdog_patience: int | None
-
-# Run garbage collection every `gc_steps` steps. -1 will run on epoch end and before
-# evaluations. Default is 0 (disabled).
-gc_steps: int | None
-
-# Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection.
-# require >=ampere
-bf16: Literal['auto'] | bool | None = auto
-# Use CUDA fp16
-fp16: bool | None
-# Enable FP8 mixed precision training using TorchAO. Best used in combination with
-# torch.compile.
-fp8: bool | None
-# Enable FSDP float8 all-gather optimization for FP8 training. Can improve training
-# speed by 10-15% when FSDP is enabled.
-fp8_enable_fsdp_float8_all_gather: bool | None
-# No AMP (automatic mixed precision) - require >=ampere
-bfloat16: bool | None
-# No AMP (automatic mixed precision)
-float16: bool | None
-# Use CUDA tf32 - require >=ampere
-tf32: bool | None
-float32: bool | None
-
-# Whether to use gradient checkpointing. Available options are: true, false, 'offload',
-# 'offload_disk'.
-# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing
-gradient_checkpointing: Literal['offload', 'offload_disk'] | bool | None = False
-# Additional kwargs to pass to the trainer for gradient checkpointing
-gradient_checkpointing_kwargs: dict[str, Any] | None
-# Whether to offload activations. Available options are: true, false, 'legacy', 'disk'.
-activation_offloading: Literal['legacy', 'disk'] | bool | None = False
-
-unfrozen_parameters: list[str] | None
-
-# The maximum length of an input to train with, this should typically be less than 2048
-# as most models have a token/context limit of 2048
-sequence_len: int = 512
-# What to do when a tokenized row exceeds sequence_len. 'drop' removes the row;
-# 'truncate' slices tensors to sequence_len. Defaults to 'drop' for backward
-# compatibility.
-excess_length_strategy: Literal['drop', 'truncate'] | None
-# The maximum length of an input for evaluation. If not specified, defaults to
-# sequence_len
-eval_sequence_len: int | None
-min_sample_len: int | None
-# maximum prompt length for RL training
-max_prompt_len: int | None
-# Use efficient multi-packing with block diagonal attention and per sequence
-# position_ids. Recommend set to 'true'
-sample_packing: bool | None
-# The number of samples packed at a time. Increasing the following values helps with
-# packing, but usually only slightly (<%1.)
-sample_packing_group_size: int | None = 100000
-# The number of samples which can be packed into one sequence. Increase if using a large
-# sequence_len with many short samples.
-sample_packing_bin_size: int | None = 200
-# Whether to pack samples sequentially
-sample_packing_sequentially: bool | None
-# The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or
-# 'forkserver'
-sample_packing_mp_start_method: str | None
-# Set to 'false' if getting errors during eval with sample_packing on
-eval_sample_packing: bool | None
-# Pad inputs so each step uses constant sized buffers. This will reduce memory
-# fragmentation and may prevent OOMs, by re-using memory more efficiently. Defaults to
-# True if `sample_packing` enabled
-pad_to_sequence_len: bool | None
-# Whether to use sequential sampling for curriculum learning
-curriculum_sampling: bool | None
-multipack_real_batches: bool | None
-
-# Use batch flattening for speedups when not using sample_packing
-batch_flattening: Literal['auto'] | bool | None
-
-use_pose: bool | None
-pose_split_on_token_ids: list[int] | None
-pose_max_context_len: int | None
-pose_num_chunks: int | None
+# The maximum number of processes to use while preprocessing your input dataset. This
+# defaults to `os.cpu_count()` if not set. For Runpod VMs, it will default to number of
+# vCPUs via RUNPOD_CPU_COUNT.
+dataset_num_proc: int | None
+
+# Deduplicates datasets and test_datasets with identical entries
+dataset_exact_deduplication: bool | None
+# Keep dataset in memory while preprocessing. Only needed if cached dataset is taking
+# too much storage
+dataset_keep_in_memory: bool | None
+dataloader_pin_memory: bool | None
+dataloader_num_workers: int | None
+dataloader_prefetch_factor: int | None
+dataloader_drop_last: bool | None
+
+accelerator_config: dict[str, Any] | None
+
+remove_unused_columns: bool | None
+
+# Push prepared dataset to hub - repo_org/repo_name
+push_dataset_to_hub: str | None
+# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private
+# datasets. Required to be true when used in combination with `push_dataset_to_hub`
+hf_use_auth_token: bool | None
+
+device: Any | None
+# Passed through to transformers when loading the model when launched without
+# accelerate. Use `sequential` when training w/ model parallelism to limit memory
+device_map: Any | None
+world_size: int | None
+# Don't mess with this, it's here for accelerate and torchrun
+local_rank: int | None
+ddp: bool | None
+
+# Seed for reproducibility
+seed: int | None
+# Advanced DDP Arguments - timeout
+ddp_timeout: int | None
+# Advanced DDP Arguments - bucket cap in MB
+ddp_bucket_cap_mb: int | None
+# Advanced DDP Arguments - broadcast buffers
+ddp_broadcast_buffers: bool | None
+ddp_find_unused_parameters: bool | None
+
+# Approximate number of predictions sent to wandb depending on batch size. Enabled above
+# 0. Default is 0
+eval_table_size: int | None
+# Total number of tokens generated for predictions sent to wandb. Default is 128
+eval_max_new_tokens: int | None
+# Whether to run causal language model evaluation for metrics in
+# `eval_causal_lm_metrics`
+do_causal_lm_eval: bool | None
+# HF evaluate metrics used during evaluation. Default is ['sacrebleu', 'comet', 'ter',
+# 'chrf', 'perplexity']
+eval_causal_lm_metrics: list[str] | None
+do_bench_eval: bool | None
+bench_dataset: str | None
+bench_split: str | None
+metric_for_best_model: str | None
+greater_is_better: bool | None
+
+# High loss value, indicating the learning has broken down (a good estimate is ~2 times
+# the loss at the start of training)
+loss_watchdog_threshold: float | None
+# Number of high-loss steps in a row before the trainer aborts (default: 3)
+loss_watchdog_patience: int | None
+
+# Run garbage collection every `gc_steps` steps. -1 will run on epoch end and before
+# evaluations. Default is 0 (disabled).
+gc_steps: int | None
+
+# Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection.
+# require >=ampere
+bf16: Literal['auto'] | bool | None = auto
+# Use CUDA fp16
+fp16: bool | None
+# Enable FP8 mixed precision training using TorchAO. Best used in combination with
+# torch.compile.
+fp8: bool | None
+# Enable FSDP float8 all-gather optimization for FP8 training. Can improve training
+# speed by 10-15% when FSDP is enabled.
+fp8_enable_fsdp_float8_all_gather: bool | None
+# No AMP (automatic mixed precision) - require >=ampere
+bfloat16: bool | None
+# No AMP (automatic mixed precision)
+float16: bool | None
+# Use CUDA tf32 - require >=ampere
+tf32: bool | None
+float32: bool | None
+
+# Whether to use gradient checkpointing. Available options are: true, false, 'offload',
+# 'offload_disk'.
+# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing
+gradient_checkpointing: Literal['offload', 'offload_disk'] | bool | None = False
+# Additional kwargs to pass to the trainer for gradient checkpointing
+gradient_checkpointing_kwargs: dict[str, Any] | None
+# Whether to offload activations. Available options are: true, false, 'legacy', 'disk'.
+activation_offloading: Literal['legacy', 'disk'] | bool | None = False
+
+unfrozen_parameters: list[str] | None
+
+# The maximum length of an input to train with, this should typically be less than 2048
+# as most models have a token/context limit of 2048
+sequence_len: int = 512
+# What to do when a tokenized row exceeds sequence_len. 'drop' removes the row;
+# 'truncate' slices tensors to sequence_len. Defaults to 'drop' for backward
+# compatibility.
+excess_length_strategy: Literal['drop', 'truncate'] | None
+# The maximum length of an input for evaluation. If not specified, defaults to
+# sequence_len
+eval_sequence_len: int | None
+min_sample_len: int | None
+# maximum prompt length for RL training
+max_prompt_len: int | None
+# Use efficient multi-packing with block diagonal attention and per sequence
+# position_ids. Recommend set to 'true'
+sample_packing: bool | None
+# The number of samples packed at a time. Increasing the following values helps with
+# packing, but usually only slightly (<%1.)
+sample_packing_group_size: int | None = 100000
+# The number of samples which can be packed into one sequence. Increase if using a large
+# sequence_len with many short samples.
+sample_packing_bin_size: int | None = 200
+# Whether to pack samples sequentially
+sample_packing_sequentially: bool | None
+# The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or
+# 'forkserver'
+sample_packing_mp_start_method: str | None
+# Set to 'false' if getting errors during eval with sample_packing on
+eval_sample_packing: bool | None
+# Pad inputs so each step uses constant sized buffers. This will reduce memory
+# fragmentation and may prevent OOMs, by re-using memory more efficiently. Defaults to
+# True if `sample_packing` enabled
+pad_to_sequence_len: bool | None
+# Whether to use sequential sampling for curriculum learning
+curriculum_sampling: bool | None
+multipack_real_batches: bool | None
+
+# Use batch flattening for speedups when not using sample_packing
+batch_flattening: Literal['auto'] | bool | None
-pretrain_multipack_buffer_size: int | None
-# whether to prevent cross attention for packed sequences during pretraining
-pretrain_multipack_attn: bool | None = True
-# whether to concatenate samples during pretraining
-pretraining_sample_concatenation: bool | None
-
-# Use streaming mode for loading datasets
-streaming: bool | None
-# Buffer size for multipack streaming datasets
-streaming_multipack_buffer_size: int | None = 10000
+use_pose: bool | None
+pose_split_on_token_ids: list[int] | None
+pose_max_context_len: int | None
+pose_num_chunks: int | None
+
+pretrain_multipack_buffer_size: int | None
+# whether to prevent cross attention for packed sequences during pretraining
+pretrain_multipack_attn: bool | None = True
+# whether to concatenate samples during pretraining
+pretraining_sample_concatenation: bool | None
-# Whether to use xformers attention patch https://github.com/facebookresearch/xformers
-xformers_attention: bool | None
-# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/
-# torch.nn.functional.scaled_dot_product_attention.html
-sdp_attention: bool | None
-# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf
-s2_attention: bool | None
-flex_attention: bool | None
-flex_attn_compile_kwargs: dict[str, Any] | None
-# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention
-flash_attention: bool | None
-# Whether to use flash-attention cross entropy implementation - advanced use only
-flash_attn_cross_entropy: bool | None
-# Whether to use flash-attention rms norm implementation - advanced use only
-flash_attn_rms_norm: bool | None
-# Whether to fuse part of the MLP into a single operation
-flash_attn_fuse_mlp: bool | None
-# Whether to use bettertransformers
-flash_optimum: bool | None
-
-eager_attention: bool | None
-
-# Specify a custom attention implementation, used mostly for kernels.
-attn_implementation: str | None
+# Use streaming mode for loading datasets
+streaming: bool | None
+# Buffer size for multipack streaming datasets
+streaming_multipack_buffer_size: int | None = 10000
+
+# Whether to use xformers attention patch https://github.com/facebookresearch/xformers
+xformers_attention: bool | None
+# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/
+# torch.nn.functional.scaled_dot_product_attention.html
+sdp_attention: bool | None
+# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf
+s2_attention: bool | None
+flex_attention: bool | None
+flex_attn_compile_kwargs: dict[str, Any] | None
+# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention
+flash_attention: bool | None
+# Whether to use flash-attention cross entropy implementation - advanced use only
+flash_attn_cross_entropy: bool | None
+# Whether to use flash-attention rms norm implementation - advanced use only
+flash_attn_rms_norm: bool | None
+# Whether to fuse part of the MLP into a single operation
+flash_attn_fuse_mlp: bool | None
+# Whether to use bettertransformers
+flash_optimum: bool | None
-unsloth_cross_entropy_loss: bool | None
-unsloth_lora_mlp: bool | None
-unsloth_lora_qkv: bool | None
-unsloth_lora_o: bool | None
-unsloth_rms_norm: bool | None
-unsloth_rope: bool | None
-
-# Apply custom LoRA autograd functions and activation function Triton kernels for speed
-# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html
-lora_mlp_kernel: bool | None
-# Apply custom LoRA autograd functions and activation function Triton kernels for speed
-# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html
-lora_qkv_kernel: bool | None
-# Apply custom LoRA autograd functions and activation function Triton kernels for speed
-# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html
-lora_o_kernel: bool | None
-
-# Whether to use chunked cross entropy loss for memory efficiency
-chunked_cross_entropy: bool | None
-# Number of chunks to use for chunked cross entropy loss
-chunked_cross_entropy_num_chunks: int | None
+eager_attention: bool | None
+
+# Specify a custom attention implementation, used mostly for kernels.
+attn_implementation: str | None
+
+unsloth_cross_entropy_loss: bool | None
+unsloth_lora_mlp: bool | None
+unsloth_lora_qkv: bool | None
+unsloth_lora_o: bool | None
+unsloth_rms_norm: bool | None
+unsloth_rope: bool | None
+
+# Apply custom LoRA autograd functions and activation function Triton kernels for speed
+# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html
+lora_mlp_kernel: bool | None
+# Apply custom LoRA autograd functions and activation function Triton kernels for speed
+# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html
+lora_qkv_kernel: bool | None
+# Apply custom LoRA autograd functions and activation function Triton kernels for speed
+# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html
+lora_o_kernel: bool | None
-# Whether to use ALST tiled mlp for memory efficient long context
-tiled_mlp: bool | None
-
-# Number of shards to use for ALST tiled mlp. If unset, it will be set based on
-# seqlen/hidden_size
-tiled_mlp_num_shards: int | None
-
-# Whether to use original mlp for ALST tiled mlp. Otherwise uses a generic MLP based on
-# llama.
-tiled_mlp_use_original_mlp: bool | None = True
-
-llama4_linearized_experts: bool | None
-
-# Deepspeed config path. e.g., deepspeed_configs/zero3.json
-deepspeed: str | dict[str, Any] | None
-# Whether to use deepcompile for faster training with deepspeed
-deepcompile: bool | None
-# FSDP configuration
-fsdp: list[str] | None
-
-# FSDP configuration options
-fsdp_config: FSDPConfig | None
- # For FSDPConfig:
- # Enable activation checkpointing to reduce memory usage during forward passes
-activation_checkpointing: bool | None
- # Offload parameters to CPU to reduce GPU memory usage
-offload_params: bool | None
- # Synchronize module states across all processes
-sync_module_states: bool | None
- # Enable CPU RAM efficient loading to reduce memory usage during model loading
-cpu_ram_efficient_loading: bool | None
- # Disabling this enables swap memory usage for resource-constrained setups when
- # offload_params is enabled.
-cpu_offload_pin_memory: bool | None
- # Use original parameters instead of flattened parameters
-use_orig_params: bool | None
-
- # Type of state dict to use for saving/loading checkpoints
-state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None
- # Final state dict type to use after training completion
-final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None
+# Whether to use chunked cross entropy loss for memory efficiency
+chunked_cross_entropy: bool | None
+# Number of chunks to use for chunked cross entropy loss
+chunked_cross_entropy_num_chunks: int | None
+
+# Whether to use ALST tiled mlp for memory efficient long context
+tiled_mlp: bool | None
+
+# Number of shards to use for ALST tiled mlp. If unset, it will be set based on
+# seqlen/hidden_size
+tiled_mlp_num_shards: int | None
+
+# Whether to use original mlp for ALST tiled mlp. Otherwise uses a generic MLP based on
+# llama.
+tiled_mlp_use_original_mlp: bool | None = True
+
+llama4_linearized_experts: bool | None
+
+# Deepspeed config path. e.g., deepspeed_configs/zero3.json
+deepspeed: str | dict[str, Any] | None
+# Whether to use deepcompile for faster training with deepspeed
+deepcompile: bool | None
+# FSDP configuration
+fsdp: list[str] | None
+
+# FSDP configuration options
+fsdp_config: FSDPConfig | None
+ # For FSDPConfig:
+ # Enable activation checkpointing to reduce memory usage during forward passes
+activation_checkpointing: bool | None
+ # Offload parameters to CPU to reduce GPU memory usage
+offload_params: bool | None
+ # Synchronize module states across all processes
+sync_module_states: bool | None
+ # Enable CPU RAM efficient loading to reduce memory usage during model loading
+cpu_ram_efficient_loading: bool | None
+ # Disabling this enables swap memory usage for resource-constrained setups when
+ # offload_params is enabled.
+cpu_offload_pin_memory: bool | None
+ # Use original parameters instead of flattened parameters
+use_orig_params: bool | None
- # Policy for automatically wrapping modules with FSDP
-auto_wrap_policy: Literal['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP'] | None
- # Class name of transformer layers to wrap (e.g., 'LlamaDecoderLayer')
-transformer_layer_cls_to_wrap: str | None
+ # Type of state dict to use for saving/loading checkpoints
+state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None
+ # Final state dict type to use after training completion
+final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None
- # Reshard parameters after forward pass to save memory
-reshard_after_forward: bool | None
- # Mixed precision policy for FSDP (e.g., 'fp16', 'bf16')
-mixed_precision_policy: str | None
+ # Policy for automatically wrapping modules with FSDP
+auto_wrap_policy: Literal['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP'] | None
+ # Class name of transformer layers to wrap (e.g., 'LlamaDecoderLayer')
+transformer_layer_cls_to_wrap: str | None
-# FSDP version
-fsdp_version: int | None
-fsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None
-
-# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for
-# no eval.
-val_set_size: float | None = 0.0
-
-# Number of devices to shard across. If not set, will use all available devices.
-dp_shard_size: int | None
-# Number of devices to replicate across.
-dp_replicate_size: int | None
-# Deprecated: use `context_parallel_size` instead
-sequence_parallel_degree: int | None
-# Set to a divisor of the number of GPUs available to split sequences into chunks of
-# equal size. Use in long context training to prevent OOM when sequences cannot fit into
-# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each
-# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized
-# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more
-# details.
-context_parallel_size: int | None
-# Optional; strides across the key dimension. Larger values use more memory but should
-# make training faster. Must evenly divide the number of KV heads in your model.
-heads_k_stride: int | None
-# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to
-# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing
-# case.
-ring_attn_func: RingAttnFunc | None
-# Number of tensor parallel processes in TP group. Only supported with DeepSpeed AutoTP.
-tensor_parallel_size: int | None
-
-# Add or change special tokens. If you add tokens here, you don't need to add them to
-# the `tokens` list.
-special_tokens: SpecialTokensConfig | None
- # For SpecialTokensConfig:
-bos_token: str | None
-eos_token: str | None
-pad_token: str | None
-unk_token: str | None
-additional_special_tokens: list[str] | None
-
-# Add extra tokens to the tokenizer
-tokens: list[str] | None
-# Mapping token_id to new_token_string to override reserved added_tokens in the
-# tokenizer. Only works for tokens that are not part of the base vocab (aka are
-# added_tokens). Can be checked if they exist in tokenizer.json added_tokens.
-added_tokens_overrides: dict[int, str] | None
-
-# Whether to use torch.compile and which backend to use. setting to `auto` will enable
-# torch compile when torch>=2.6.0
-torch_compile: Literal['auto'] | bool | None
-# Backend to use for torch.compile
-torch_compile_backend: str | None
-torch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None
-
-# Maximum number of iterations to train for. It precedes num_epochs which means that if
-# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =>
-# `num_epochs: 2` and `max_steps: 100` will train for 100 steps
-max_steps: int | None
-# Number of warmup steps. Cannot use with warmup_ratio
-warmup_steps: int | None
-# Warmup ratio. Cannot use with warmup_steps
-warmup_ratio: float | None
-# Leave empty to eval at each epoch, integer for every N steps. float for fraction of
-# total steps
-eval_steps: int | float | None
-# Number of times per epoch to run evals, mutually exclusive with eval_steps
-evals_per_epoch: int | None
-# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer
-# from `eval_steps`
-eval_strategy: str | None
-
-# Leave empty to save at each epoch, integer for every N steps. float for fraction of
-# total steps
-save_steps: int | float | None
-# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps
-saves_per_epoch: int | None
-# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better
-# result is achieved, leave empty to infer from `save_steps`
-save_strategy: str | None
-# Checkpoints saved at a time
-save_total_limit: int | None
-# Whether to checkpoint a model after the first step of training. Defaults to False.
-save_first_step: bool | None
-
-# Logging frequency
-logging_steps: int | None
-# Stop training after this many evaluation losses have increased in a row. https://huggi
-# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin
-# gCallback
-early_stopping_patience: int | None
-load_best_model_at_end: bool | None = False
-# Save only the model weights, skipping the optimizer. Using this means you can't resume
-# from checkpoints.
-save_only_model: bool | None = False
-# Use tensorboard for logging
-use_tensorboard: bool | None
-# Enable the pytorch profiler to capture the first N steps of training to the
-# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more
-# information. Snapshots can be visualized @ https://pytorch.org/memory_viz
-profiler_steps: int | None
-# Which step to start the profiler at. Useful for only capturing a few steps mid-run.
-profiler_steps_start: int | None = 0
-# bool of whether to report tokens per second at the end of training. This is not
-# supported with pre-training datasets.
-include_tokens_per_second: bool | None
-# bool of whether to report tokens per second per-gpu during training by measuring
-# throughput of non-padding tokens.
-include_tkps: bool | None = True
-# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to
-# add noise to embeddings. Currently only supported on Llama and Mistral
-neftune_noise_alpha: float | None
-
-# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to
-# `beta` in `ORPOConfig` due to trl mapping.
-orpo_alpha: float | None
-# Weighting of NLL term in loss from RPO paper
-rpo_alpha: float | None
-# Target reward margin for the SimPO loss
-simpo_gamma: float | None
-# Weight of the BC regularizer
-cpo_alpha: float | None
-
-# Factor for desirable loss term in KTO loss
-kto_desirable_weight: float | None
-# Factor for undesirable loss term in KTO loss
-kto_undesirable_weight: float | None
-# The beta parameter for the RL training
-rl_beta: float | None
-
-# Defines the max memory usage per gpu on the system. Passed through to transformers
-# when loading the model.
-max_memory: dict[int | Literal['cpu', 'disk'], int | str] | None
-# Limit the memory for all available GPUs to this amount (if an integer, expressed in
-# gigabytes); default: unset
-gpu_memory_limit: int | str | None
-# Whether to use low_cpu_mem_usage
-low_cpu_mem_usage: bool | None
-
-# The name of the chat template to use for training, following values are supported:
-# tokenizer_default: Uses the chat template that is available in the
-# tokenizer_config.json. If the chat template is not available in the tokenizer, it will
-# raise an error. This is the default value.
-# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates
-# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.
-# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.
-# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not
-# available in the tokenizer. jinja: Uses a custom jinja template for the chat template.
-# The custom jinja template should be provided in the chat_template_jinja field. The
-# selected chat template will be saved to the tokenizer_config.json for easier
-# inferencing
-chat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None
-# Custom jinja template or path to jinja file for chat template. This will be only used
-# if chat_template is set to `jinja` or `null` (in which case chat_template is
-# automatically set to `jinja`). Default is null.
-chat_template_jinja: str | None
-# Additional kwargs to pass to the chat template. This is useful for customizing the
-# chat template. For example, you can pass `thinking=False` to add a generation prompt
-# to the chat template.
-chat_template_kwargs: dict[str, Any] | None
-# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the
-# boundaries between conversation turns. For example: ['/INST', '</s>',
-# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is
-# useful for templates that use multiple delimiter tokens.
-eot_tokens: list[str] | None
-# Changes the default system message. Currently only supports chatml.
-default_system_message: str | None
-
-# Token index or indices to adjust embedding weights to the mean of the other tokens.
-# This is useful when the model has untrained embeddings.
-fix_untrained_tokens: int | list[int] | None
-
-is_preprocess: bool | None
-preprocess_iterable: bool | None
-
-# Total number of tokens - internal use
-total_num_tokens: int | None
-total_supervised_tokens: int | None
-# You can set these packing optimizations AFTER starting a training at least once. The
-# trainer will provide recommended values for these values.
-sample_packing_eff_est: float | None
-axolotl_config_path: str | None
-
-# Internal use only - Used to identify which the model is based on
-is_falcon_derived_model: bool | None
-# Internal use only - Used to identify which the model is based on
-is_llama_derived_model: bool | None
-# Internal use only - Used to identify which the model is based on. Please note that if
-# you set this to true, `padding_side` will be set to 'left' by default
-is_mistral_derived_model: bool | None
+ # Reshard parameters after forward pass to save memory
+reshard_after_forward: bool | None
+ # Mixed precision policy for FSDP (e.g., 'fp16', 'bf16')
+mixed_precision_policy: str | None
+
+# FSDP version
+fsdp_version: int | None
+fsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None
+
+# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for
+# no eval.
+val_set_size: float | None = 0.0
+
+# Number of devices to shard across. If not set, will use all available devices.
+dp_shard_size: int | None
+# Number of devices to replicate across.
+dp_replicate_size: int | None
+# Deprecated: use `context_parallel_size` instead
+sequence_parallel_degree: int | None
+# Set to a divisor of the number of GPUs available to split sequences into chunks of
+# equal size. Use in long context training to prevent OOM when sequences cannot fit into
+# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each
+# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized
+# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more
+# details.
+context_parallel_size: int | None
+# Optional; strides across the key dimension. Larger values use more memory but should
+# make training faster. Must evenly divide the number of KV heads in your model.
+heads_k_stride: int | None
+# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to
+# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing
+# case.
+ring_attn_func: RingAttnFunc | None
+# Number of tensor parallel processes in TP group. Only supported with DeepSpeed AutoTP.
+tensor_parallel_size: int | None
+
+# Add or change special tokens. If you add tokens here, you don't need to add them to
+# the `tokens` list.
+special_tokens: SpecialTokensConfig | None
+ # For SpecialTokensConfig:
+bos_token: str | None
+eos_token: str | None
+pad_token: str | None
+unk_token: str | None
+additional_special_tokens: list[str] | None
+
+# Add extra tokens to the tokenizer
+tokens: list[str] | None
+# Mapping token_id to new_token_string to override reserved added_tokens in the
+# tokenizer. Only works for tokens that are not part of the base vocab (aka are
+# added_tokens). Can be checked if they exist in tokenizer.json added_tokens.
+added_tokens_overrides: dict[int, str] | None
+
+# Whether to use torch.compile and which backend to use. setting to `auto` will enable
+# torch compile when torch>=2.6.0
+torch_compile: Literal['auto'] | bool | None
+# Backend to use for torch.compile
+torch_compile_backend: str | None
+torch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None
+
+# Maximum number of iterations to train for. It precedes num_epochs which means that if
+# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =>
+# `num_epochs: 2` and `max_steps: 100` will train for 100 steps
+max_steps: int | None
+# Number of warmup steps. Cannot use with warmup_ratio
+warmup_steps: int | None
+# Warmup ratio. Cannot use with warmup_steps
+warmup_ratio: float | None
+# Leave empty to eval at each epoch, integer for every N steps. float for fraction of
+# total steps
+eval_steps: int | float | None
+# Number of times per epoch to run evals, mutually exclusive with eval_steps
+evals_per_epoch: int | None
+# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer
+# from `eval_steps`
+eval_strategy: str | None
+
+# Leave empty to save at each epoch, integer for every N steps. float for fraction of
+# total steps
+save_steps: int | float | None
+# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps
+saves_per_epoch: int | None
+# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better
+# result is achieved, leave empty to infer from `save_steps`
+save_strategy: str | None
+# Checkpoints saved at a time
+save_total_limit: int | None
+# Whether to checkpoint a model after the first step of training. Defaults to False.
+save_first_step: bool | None
+
+# Logging frequency
+logging_steps: int | None
+# Stop training after this many evaluation losses have increased in a row. https://huggi
+# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin
+# gCallback
+early_stopping_patience: int | None
+load_best_model_at_end: bool | None = False
+# Save only the model weights, skipping the optimizer. Using this means you can't resume
+# from checkpoints.
+save_only_model: bool | None = False
+# Use tensorboard for logging
+use_tensorboard: bool | None
+# Enable the pytorch profiler to capture the first N steps of training to the
+# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more
+# information. Snapshots can be visualized @ https://pytorch.org/memory_viz
+profiler_steps: int | None
+# Which step to start the profiler at. Useful for only capturing a few steps mid-run.
+profiler_steps_start: int | None = 0
+# bool of whether to report tokens per second at the end of training. This is not
+# supported with pre-training datasets.
+include_tokens_per_second: bool | None
+# bool of whether to report tokens per second per-gpu during training by measuring
+# throughput of non-padding tokens.
+include_tkps: bool | None = True
+# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to
+# add noise to embeddings. Currently only supported on Llama and Mistral
+neftune_noise_alpha: float | None
+
+# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to
+# `beta` in `ORPOConfig` due to trl mapping.
+orpo_alpha: float | None
+# Weighting of NLL term in loss from RPO paper
+rpo_alpha: float | None
+# Target reward margin for the SimPO loss
+simpo_gamma: float | None
+# Weight of the BC regularizer
+cpo_alpha: float | None
+
+# Factor for desirable loss term in KTO loss
+kto_desirable_weight: float | None
+# Factor for undesirable loss term in KTO loss
+kto_undesirable_weight: float | None
+# The beta parameter for the RL training
+rl_beta: float | None
+
+# Defines the max memory usage per gpu on the system. Passed through to transformers
+# when loading the model.
+max_memory: dict[int | Literal['cpu', 'disk'], int | str] | None
+# Limit the memory for all available GPUs to this amount (if an integer, expressed in
+# gigabytes); default: unset
+gpu_memory_limit: int | str | None
+# Whether to use low_cpu_mem_usage
+low_cpu_mem_usage: bool | None
+
+# The name of the chat template to use for training, following values are supported:
+# tokenizer_default: Uses the chat template that is available in the
+# tokenizer_config.json. If the chat template is not available in the tokenizer, it will
+# raise an error. This is the default value.
+# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates
+# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.
+# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.
+# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not
+# available in the tokenizer. jinja: Uses a custom jinja template for the chat template.
+# The custom jinja template should be provided in the chat_template_jinja field. The
+# selected chat template will be saved to the tokenizer_config.json for easier
+# inferencing
+chat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None
+# Custom jinja template or path to jinja file for chat template. This will be only used
+# if chat_template is set to `jinja` or `null` (in which case chat_template is
+# automatically set to `jinja`). Default is null.
+chat_template_jinja: str | None
+# Additional kwargs to pass to the chat template. This is useful for customizing the
+# chat template. For example, you can pass `thinking=False` to add a generation prompt
+# to the chat template.
+chat_template_kwargs: dict[str, Any] | None
+# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the
+# boundaries between conversation turns. For example: ['/INST', '</s>',
+# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is
+# useful for templates that use multiple delimiter tokens.
+eot_tokens: list[str] | None
+# Changes the default system message. Currently only supports chatml.
+default_system_message: str | None
+
+# Token index or indices to adjust embedding weights to the mean of the other tokens.
+# This is useful when the model has untrained embeddings.
+fix_untrained_tokens: int | list[int] | None
+
+is_preprocess: bool | None
+preprocess_iterable: bool | None
+
+# Total number of tokens - internal use
+total_num_tokens: int | None
+total_supervised_tokens: int | None
+# You can set these packing optimizations AFTER starting a training at least once. The
+# trainer will provide recommended values for these values.
+sample_packing_eff_est: float | None
+axolotl_config_path: str | None
+
+# Internal use only - Used to identify which the model is based on
+is_falcon_derived_model: bool | None# Internal use only - Used to identify which the model is based on
-is_qwen_derived_model: bool | None
-
-# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available
-# plugins or doc below for more details.
-# https://docs.axolotl.ai/docs/custom_integrations.html
-plugins: list[str] | None
+is_llama_derived_model: bool | None
+# Internal use only - Used to identify which the model is based on. Please note that if
+# you set this to true, `padding_side` will be set to 'left' by default
+is_mistral_derived_model: bool | None
+# Internal use only - Used to identify which the model is based on
+is_qwen_derived_model: bool | None
-# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This
-# can also be a relative path to a model on disk
-base_model: str (required)
-# If the base_model repo on hf hub doesn't include configuration .json files, You can
-# set that here, or leave this empty to default to base_model
-base_model_config: str | None
-cls_model_config: str | None
-# Optional tokenizer configuration path in case you want to use a different tokenizer
-# than the one defined in the base model
-tokenizer_config: str | None
-# use_fast option for tokenizer loading from_pretrained, default to True
-tokenizer_use_fast: bool | None
-# Whether to use the legacy tokenizer setting, defaults to True
-tokenizer_legacy: bool | None
-# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-
-# common tokenizer.
-tokenizer_use_mistral_common: bool | None
-# Corresponding tokenizer for the model AutoTokenizer is a good choice
-tokenizer_type: str | None
-# transformers processor class
-processor_type: str | None
-# Whether to save jinja files for tokenizer, transformers default is True
-tokenizer_save_jinja_files: bool | None = True
-# Trust remote code for untrusted source
-trust_remote_code: bool | None
-
-# Don't move the model to the device before sharding. Set to `false` to revert to legacy
-# behavior.
-experimental_skip_move_to_device: bool | None = True
-
-# Use custom kernels, e.g. MegaBlocks.
-use_kernels: bool | None
-
-# Model loading quantization config
-model_quantization_config: Literal['Mxfp4Config'] | None
-# kwargs for model quantization config
-model_quantization_config_kwargs: dict[str, Any] | None
+# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available
+# plugins or doc below for more details.
+# https://docs.axolotl.ai/docs/custom_integrations.html
+plugins: list[str] | None
+
+# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This
+# can also be a relative path to a model on disk
+base_model: str (required)
+# If the base_model repo on hf hub doesn't include configuration .json files, You can
+# set that here, or leave this empty to default to base_model
+base_model_config: str | None
+cls_model_config: str | None
+# Optional tokenizer configuration path in case you want to use a different tokenizer
+# than the one defined in the base model
+tokenizer_config: str | None
+# use_fast option for tokenizer loading from_pretrained, default to True
+tokenizer_use_fast: bool | None
+# Whether to use the legacy tokenizer setting, defaults to True
+tokenizer_legacy: bool | None
+# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-
+# common tokenizer.
+tokenizer_use_mistral_common: bool | None
+# Corresponding tokenizer for the model AutoTokenizer is a good choice
+tokenizer_type: str | None
+# transformers processor class
+processor_type: str | None
+# Whether to save jinja files for tokenizer, transformers default is True
+tokenizer_save_jinja_files: bool | None = True
+# Trust remote code for untrusted source
+trust_remote_code: bool | None
+
+# Don't move the model to the device before sharding. Set to `false` to revert to legacy
+# behavior.
+experimental_skip_move_to_device: bool | None = True
+
+# Use custom kernels, e.g. MegaBlocks.
+use_kernels: bool | None
-# Where to save the full-finetuned model to
-output_dir: str = ./model-out
-# push checkpoints to hub
-hub_model_id: str | None
-# how to push checkpoints to hub
-hub_strategy: str | None
-# Save model as safetensors (require safetensors package). Default True
-save_safetensors: bool | None = True
-
-# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer
-load_in_8bit: bool | None = False
-# Use bitsandbytes 4 bit
-load_in_4bit: bool | None = False
+# Model loading quantization config
+model_quantization_config: Literal['Mxfp4Config'] | None
+# kwargs for model quantization config
+model_quantization_config_kwargs: dict[str, Any] | None
+
+# Where to save the full-finetuned model to
+output_dir: str = ./model-out
+# push checkpoints to hub
+hub_model_id: str | None
+# how to push checkpoints to hub
+hub_strategy: str | None
+# Save model as safetensors (require safetensors package). Default True
+save_safetensors: bool | None = True
-# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in
-# original model
-adapter: str | None
-# If you already have a lora model trained that you want to load, put that here. This
-# means after training, if you want to test the model, you should set this to the value
-# of `output_dir`. Note that if you merge an adapter to the base model, a new
-# subdirectory `merged` will be created under the `output_dir`.
-lora_model_dir: str | None
-lora_r: int | None
-lora_alpha: int | None
-lora_fan_in_fan_out: bool | None
-lora_target_modules: str | list[str] | None
-lora_target_parameters: str | list[str] | None
-# If true, will target all linear modules
-lora_target_linear: bool | None
-# If you added new tokens to the tokenizer, you may need to save some LoRA modules
-# because they need to know the new tokens. For LLaMA and Mistral, you need to save
-# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts
-# tokens to embeddings, and `lm_head` converts embeddings to token probabilities.
-lora_modules_to_save: list[str] | None
-lora_dropout: float | None = 0.0
-# The layer indices to transform, otherwise, apply to all layers
-peft_layers_to_transform: list[int] | None
-peft_layers_pattern: list[str] | None
-
-peft: PeftConfig | None
- # For PeftConfig:
- # Configuration options for loftq initialization for LoRA
-loftq_config: LoftQConfig | None
- # For LoftQConfig:
- # typically 4 bits
-loftq_bits: int = 4
-
-# Whether to use DoRA.
-peft_use_dora: bool | None
-# Whether to use RSLoRA.
-peft_use_rslora: bool | None
-# List of layer indices to replicate.
-peft_layer_replication: list[tuple[int, int]] | None
-# How to initialize LoRA weights. Default to True which is MS original implementation.
-peft_init_lora_weights: bool | str | None
-# A list of token indices to fine-tune on the `embed_tokens` layer. Otherwise, a dict
-# mapping an embedding layer name to its trainable token indices. See
-# https://huggingface.co/docs/peft/v0.17.0/en/developer_guides/lora#efficiently-train-
-# tokens-alongside-lora
-peft_trainable_token_indices: list[int] | dict[str, list[int]] | None
-
-# load qlora model in sharded format for FSDP using answer.ai technique.
-qlora_sharded_model_loading: bool | None = False
-# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it
-# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge
-lora_on_cpu: bool | None
-# Whether you are training a 4-bit GPTQ quantized model
-gptq: bool | None
-# optional overrides to the bnb 4bit quantization configuration
-bnb_config_kwargs: dict[str, Any] | None
-
-# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.
-loraplus_lr_ratio: float | None
-# loraplus learning rate for lora embedding layers. Default value is 1e-6.
-loraplus_lr_embedding: float | None = 1e-06
+# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer
+load_in_8bit: bool | None = False
+# Use bitsandbytes 4 bit
+load_in_4bit: bool | None = False
+
+# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in
+# original model
+adapter: str | None
+# If you already have a lora model trained that you want to load, put that here. This
+# means after training, if you want to test the model, you should set this to the value
+# of `output_dir`. Note that if you merge an adapter to the base model, a new
+# subdirectory `merged` will be created under the `output_dir`.
+lora_model_dir: str | None
+lora_r: int | None
+lora_alpha: int | None
+lora_fan_in_fan_out: bool | None
+lora_target_modules: str | list[str] | None
+lora_target_parameters: str | list[str] | None
+# If true, will target all linear modules
+lora_target_linear: bool | None
+# If you added new tokens to the tokenizer, you may need to save some LoRA modules
+# because they need to know the new tokens. For LLaMA and Mistral, you need to save
+# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts
+# tokens to embeddings, and `lm_head` converts embeddings to token probabilities.
+lora_modules_to_save: list[str] | None
+lora_dropout: float | None = 0.0
+# The layer indices to transform, otherwise, apply to all layers
+peft_layers_to_transform: list[int] | None
+peft_layers_pattern: list[str] | None
+
+peft: PeftConfig | None
+ # For PeftConfig:
+ # Configuration options for loftq initialization for LoRA
+loftq_config: LoftQConfig | None
+ # For LoftQConfig:
+ # typically 4 bits
+loftq_bits: int = 4
+
+# Whether to use DoRA.
+peft_use_dora: bool | None
+# Whether to use RSLoRA.
+peft_use_rslora: bool | None
+# List of layer indices to replicate.
+peft_layer_replication: list[tuple[int, int]] | None
+# How to initialize LoRA weights. Default to True which is MS original implementation.
+peft_init_lora_weights: bool | str | None
+# A list of token indices to fine-tune on the `embed_tokens` layer. Otherwise, a dict
+# mapping an embedding layer name to its trainable token indices. See
+# https://huggingface.co/docs/peft/v0.17.0/en/developer_guides/lora#efficiently-train-
+# tokens-alongside-lora
+peft_trainable_token_indices: list[int] | dict[str, list[int]] | None
+
+# load qlora model in sharded format for FSDP using answer.ai technique.
+qlora_sharded_model_loading: bool | None = False
+# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it
+# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge
+lora_on_cpu: bool | None
+# Whether you are training a 4-bit GPTQ quantized model
+gptq: bool | None
+# optional overrides to the bnb 4bit quantization configuration
+bnb_config_kwargs: dict[str, Any] | None
-merge_lora: bool | None
-
-# Whether to use ReLoRA. Use with jagged_restart_*steps options.
-relora: bool | None
-# threshold for optimizer magnitude when pruning
-relora_prune_ratio: float | None
-# True to perform lora weight merges on cpu during restarts, for modest gpu memory
-# savings
-relora_cpu_offload: bool | None
-
-# how often to reset for jagged restarts
-jagged_restart_steps: int | None
-# how many warmup steps to take after reset for jagged restarts
-jagged_restart_warmup_steps: int | None
-# how many anneal steps to take before reset for jagged restarts
-jagged_restart_anneal_steps: int | None
-
-# If greater than 1, backpropagation will be skipped and the gradients will be
-# accumulated for the given number of steps.
-gradient_accumulation_steps: int | None = 1
-# The number of samples to include in each batch. This is the number of samples sent to
-# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps
-micro_batch_size: int | None = 1
-# Total batch size, we do not recommended setting this manually
-batch_size: int | None
-# per gpu micro batch size for evals, defaults to value of micro_batch_size
-eval_batch_size: int | None
-
-# whether to find batch size that fits in memory. Passed to underlying transformers
-# Trainer
-auto_find_batch_size: bool | None
-
-# Whether to mask out or include the human's prompt from the training labels
-train_on_inputs: bool | None = False
-# Group similarly sized data to minimize padding. May be slower to start, as it must
-# download and sort the entire dataset. Note that training loss may have an oscillating
-# pattern with this enabled.
-group_by_length: bool | None
-
-learning_rate: str | float (required)
-embedding_lr: float | None
-embedding_lr_scale: float | None
-# Specify weight decay
-weight_decay: float | None = 0.0
-# Specify optimizer
-optimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED
-# Dictionary of arguments to pass to the optimizer
-optim_args: str | dict[str, Any] | None
-# The target modules to optimize, i.e. the module names that you would like to train,
-# right now this is used only for GaLore algorithm
-optim_target_modules: list[str] | Literal['all_linear'] | None
-# Path to torch distx for optim 'adamw_anyprecision'
-torchdistx_path: str | None
-lr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE
-# Specify a scheduler and kwargs to use with the optimizer
-lr_scheduler_kwargs: dict[str, Any] | None
-lr_quadratic_warmup: bool | None
-# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of
-# peak lr
-cosine_min_lr_ratio: float | None
-# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means
-# start cosine_min_lr at 80% of training step
-cosine_constant_lr_ratio: float | None
-# Learning rate div factor
-lr_div_factor: float | None
-
-lr_groups: list[LrGroup] | None
- # For LrGroup:
-name: str (required)
-modules: list[str] (required)
-lr: float (required)
-
-# adamw hyperparams
-adam_epsilon: float | None
-# only used for CAME Optimizer
-adam_epsilon2: float | None
-# adamw hyperparams
-adam_beta1: float | None
-# adamw hyperparams
-adam_beta2: float | None
-# only used for CAME Optimizer
-adam_beta3: float | None
-
-# Dion Optimizer learning rate
-dion_lr: float | None
-# Dion Optimizer momentum
-dion_momentum: float | None
-# Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank
-# dimension.
-dion_rank_fraction: float | None = 1.0
-# Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may
-# be useful to ensure even sharding.
-dion_rank_multiple_of: int | None = 1
-
-# Gradient clipping max norm
-max_grad_norm: float | None
-num_epochs: float = 1.0
-
-use_wandb: bool | None
-# Set the name of your wandb run
-wandb_name: str | None
-# Set the ID of your wandb run
-wandb_run_id: str | None
-# "offline" to save run metadata locally and not sync to the server, "disabled" to turn
-# off wandb
-wandb_mode: str | None
-# Your wandb project name
-wandb_project: str | None
-# A wandb Team name if using a Team
-wandb_entity: str | None
-wandb_watch: str | None
-# "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only
-# at the end of training
-wandb_log_model: str | None
-
-use_mlflow: bool | None
-# URI to mlflow
-mlflow_tracking_uri: str | None
-# Your experiment name
-mlflow_experiment_name: str | None
-# Your run name
-mlflow_run_name: str | None
-# set to true to copy each saved checkpoint on each save to mlflow artifact registry
-hf_mlflow_log_artifacts: bool | None
-
-# Enable or disable Comet integration.
-use_comet: bool | None
-# API key for Comet. Recommended to set via `comet login`.
-comet_api_key: str | None
-# Workspace name in Comet. Defaults to the user's default workspace.
-comet_workspace: str | None
-# Project name in Comet. Defaults to Uncategorized.
-comet_project_name: str | None
-# Identifier for the experiment. Used to append data to an existing experiment or
-# control the key of new experiments. Default to a random key.
-comet_experiment_key: str | None
-# Create a new experiment ("create") or log to an existing one ("get"). Default
-# ("get_or_create") auto-selects based on configuration.
-comet_mode: str | None
-# Set to True to log data to Comet server, or False for offline storage. Default is
-# True.
-comet_online: bool | None
-# Dictionary for additional configuration settings, see the doc for more details.
-comet_experiment_config: dict[str, Any] | None
-
-# the number of activate layers in LISA
-lisa_n_layers: int | None
-# how often to switch layers in LISA
-lisa_step_interval: int | None
-# path under the model to access the layers
-lisa_layers_attribute: str | None = model.layers
-
-gradio_title: str | None
-gradio_share: bool | None
-gradio_server_name: str | None
-gradio_server_port: int | None
-gradio_max_new_tokens: int | None
-gradio_temperature: float | None
-
-use_ray: bool = False
-ray_run_name: str | None
-ray_num_workers: int = 1
-resources_per_worker: dict
+# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.
+loraplus_lr_ratio: float | None
+# loraplus learning rate for lora embedding layers. Default value is 1e-6.
+loraplus_lr_embedding: float | None = 1e-06
+
+merge_lora: bool | None
+
+# Whether to use ReLoRA. Use with jagged_restart_*steps options.
+relora: bool | None
+# threshold for optimizer magnitude when pruning
+relora_prune_ratio: float | None
+# True to perform lora weight merges on cpu during restarts, for modest gpu memory
+# savings
+relora_cpu_offload: bool | None
+
+# how often to reset for jagged restarts
+jagged_restart_steps: int | None
+# how many warmup steps to take after reset for jagged restarts
+jagged_restart_warmup_steps: int | None
+# how many anneal steps to take before reset for jagged restarts
+jagged_restart_anneal_steps: int | None
+
+# If greater than 1, backpropagation will be skipped and the gradients will be
+# accumulated for the given number of steps.
+gradient_accumulation_steps: int | None = 1
+# The number of samples to include in each batch. This is the number of samples sent to
+# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps
+micro_batch_size: int | None = 1
+# Total batch size, we do not recommended setting this manually
+batch_size: int | None
+# per gpu micro batch size for evals, defaults to value of micro_batch_size
+eval_batch_size: int | None
+
+# whether to find batch size that fits in memory. Passed to underlying transformers
+# Trainer
+auto_find_batch_size: bool | None
+
+# Whether to mask out or include the human's prompt from the training labels
+train_on_inputs: bool | None = False
+# Group similarly sized data to minimize padding. May be slower to start, as it must
+# download and sort the entire dataset. Note that training loss may have an oscillating
+# pattern with this enabled.
+group_by_length: bool | None
+
+learning_rate: str | float (required)
+embedding_lr: float | None
+embedding_lr_scale: float | None
+# Specify weight decay
+weight_decay: float | None = 0.0
+# Specify optimizer
+optimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED
+# Dictionary of arguments to pass to the optimizer
+optim_args: str | dict[str, Any] | None
+# The target modules to optimize, i.e. the module names that you would like to train,
+# right now this is used only for GaLore algorithm
+optim_target_modules: list[str] | Literal['all_linear'] | None
+# Path to torch distx for optim 'adamw_anyprecision'
+torchdistx_path: str | None
+lr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE
+# Specify a scheduler and kwargs to use with the optimizer
+lr_scheduler_kwargs: dict[str, Any] | None
+lr_quadratic_warmup: bool | None
+# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of
+# peak lr
+cosine_min_lr_ratio: float | None
+# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means
+# start cosine_min_lr at 80% of training step
+cosine_constant_lr_ratio: float | None
+# Learning rate div factor
+lr_div_factor: float | None
+
+lr_groups: list[LrGroup] | None
+ # For LrGroup:
+name: str (required)
+modules: list[str] (required)
+lr: float (required)
+
+# adamw hyperparams
+adam_epsilon: float | None
+# only used for CAME Optimizer
+adam_epsilon2: float | None
+# adamw hyperparams
+adam_beta1: float | None
+# adamw hyperparams
+adam_beta2: float | None
+# only used for CAME Optimizer
+adam_beta3: float | None
+
+# Dion Optimizer learning rate
+dion_lr: float | None
+# Dion Optimizer momentum
+dion_momentum: float | None
+# Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank
+# dimension.
+dion_rank_fraction: float | None = 1.0
+# Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may
+# be useful to ensure even sharding.
+dion_rank_multiple_of: int | None = 1
+
+# Gradient clipping max norm
+max_grad_norm: float | None
+num_epochs: float = 1.0
+
+use_wandb: bool | None
+# Set the name of your wandb run
+wandb_name: str | None
+# Set the ID of your wandb run
+wandb_run_id: str | None
+# "offline" to save run metadata locally and not sync to the server, "disabled" to turn
+# off wandb
+wandb_mode: str | None
+# Your wandb project name
+wandb_project: str | None
+# A wandb Team name if using a Team
+wandb_entity: str | None
+wandb_watch: str | None
+# "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only
+# at the end of training
+wandb_log_model: str | None
+
+use_mlflow: bool | None
+# URI to mlflow
+mlflow_tracking_uri: str | None
+# Your experiment name
+mlflow_experiment_name: str | None
+# Your run name
+mlflow_run_name: str | None
+# set to true to copy each saved checkpoint on each save to mlflow artifact registry
+hf_mlflow_log_artifacts: bool | None
+
+# Enable or disable Comet integration.
+use_comet: bool | None
+# API key for Comet. Recommended to set via `comet login`.
+comet_api_key: str | None
+# Workspace name in Comet. Defaults to the user's default workspace.
+comet_workspace: str | None
+# Project name in Comet. Defaults to Uncategorized.
+comet_project_name: str | None
+# Identifier for the experiment. Used to append data to an existing experiment or
+# control the key of new experiments. Default to a random key.
+comet_experiment_key: str | None
+# Create a new experiment ("create") or log to an existing one ("get"). Default
+# ("get_or_create") auto-selects based on configuration.
+comet_mode: str | None
+# Set to True to log data to Comet server, or False for offline storage. Default is
+# True.
+comet_online: bool | None
+# Dictionary for additional configuration settings, see the doc for more details.
+comet_experiment_config: dict[str, Any] | None
+
+# the number of activate layers in LISA
+lisa_n_layers: int | None
+# how often to switch layers in LISA
+lisa_step_interval: int | None
+# path under the model to access the layers
+lisa_layers_attribute: str | None = model.layers
+
+gradio_title: str | None
+gradio_share: bool | None
+gradio_server_name: str | None
+gradio_server_port: int | None
+gradio_max_new_tokens: int | None
+gradio_temperature: float | None
-# The size of the image to resize to. It can be an integer (resized into padded-square
-# image) or a tuple (width, height).If not provided, we will attempt to load from
-# preprocessor.size, otherwise, images won't be resized.
-image_size: int | tuple[int, int] | None
-# The resampling algorithm to use for image resizing. Default is bilinear. Please refer
-# to PIL.Image.Resampling for more details.
-image_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None
-
-# optional overrides to the base model configuration
-overrides_of_model_config: dict[str, Any] | None
-# optional overrides the base model loading from_pretrained
-overrides_of_model_kwargs: dict[str, Any] | None
-# If you want to specify the type of model to load, AutoModelForCausalLM is a good
-# choice too
-type_of_model: str | None
-# You can specify to choose a specific model revision from huggingface hub
-revision_of_model: str | None
-
-max_packed_sequence_len: int | None
-rope_scaling: Any | None
-noisy_embedding_alpha: float | None
-dpo_beta: float | None
-evaluation_strategy: str | None
+use_ray: bool = False
+ray_run_name: str | None
+ray_num_workers: int = 1
+resources_per_worker: dict
+
+# The size of the image to resize to. It can be an integer (resized into padded-square
+# image) or a tuple (width, height).If not provided, we will attempt to load from
+# preprocessor.size, otherwise, images won't be resized.
+image_size: int | tuple[int, int] | None
+# The resampling algorithm to use for image resizing. Default is bilinear. Please refer
+# to PIL.Image.Resampling for more details.
+image_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None
+
+# optional overrides to the base model configuration
+overrides_of_model_config: dict[str, Any] | None
+# optional overrides the base model loading from_pretrained
+overrides_of_model_kwargs: dict[str, Any] | None
+# If you want to specify the type of model to load, AutoModelForCausalLM is a good
+# choice too
+type_of_model: str | None
+# You can specify to choose a specific model revision from huggingface hub
+revision_of_model: str | None
+
+max_packed_sequence_len: int | None
+rope_scaling: Any | None
+noisy_embedding_alpha: float | None
+dpo_beta: float | None
+evaluation_strategy: str | None
diff --git a/docs/custom_integrations.html b/docs/custom_integrations.html
index cedfcd757..d4c8ec3e6 100644
--- a/docs/custom_integrations.html
+++ b/docs/custom_integrations.html
@@ -617,7 +617,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});
diff --git a/docs/debugging.html b/docs/debugging.html
index 31d2179c2..773e78cbc 100644
--- a/docs/debugging.html
+++ b/docs/debugging.html
@@ -588,7 +588,7 @@ All of these tips are incorporated into the example con
datasets:
@@ -653,7 +653,7 @@ If you prefer to watch a video, rather than read, you can skip to the "-m","axolotl.cli.train","dev_chat_template.yml",//Theflagsbelowsimplifydebuggingbyoverridingtheaxolotlconfig//withthedebuggingtipsabove.Modifyasneeded.
-"--dataset_processes=1",//limitsdatapreprocessingtooneprocess
+"--dataset_num_proc=1",//limitsdatapreprocessingtooneprocess"--max_steps=1",//limitstrainingtojustonestep"--batch_size=1",//minimizesbatchsize"--micro_batch_size=1",//minimizesbatchsize
diff --git a/examples/colab-notebooks/colab-axolotl-example.html b/examples/colab-notebooks/colab-axolotl-example.html
index 5d76178b4..7cefc7c66 100644
--- a/examples/colab-notebooks/colab-axolotl-example.html
+++ b/examples/colab-notebooks/colab-axolotl-example.html
@@ -565,7 +565,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});
%%capture# This step can take ~5-10 minutes to install dependencies!pip install --no-build-isolation axolotl[flash-attn]>=0.9.1
-!pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@49f3308"
diff --git a/search.json b/search.json
index 6f38d9adb..f74149433 100644
--- a/search.json
+++ b/search.json
@@ -418,7 +418,7 @@
"href": "docs/debugging.html#general-tips",
"title": "Debugging",
"section": "General Tips",
- "text": "General Tips\nWhile debugging it’s helpful to simplify your test scenario as much as possible. Here are some tips for doing so:\n\n[!Important]\nAll of these tips are incorporated into the example configuration for debugging with VSCode below.\n\n\nMake sure you are using the latest version of axolotl: This project changes often and bugs get fixed fast. Check your git branch and make sure you have pulled the latest changes from main.\nEliminate concurrency: Restrict the number of processes to 1 for both training and data preprocessing:\n\nSet CUDA_VISIBLE_DEVICES to a single GPU, ex: export CUDA_VISIBLE_DEVICES=0.\nSet dataset_processes: 1 in your axolotl config or run the training command with --dataset_processes=1.\n\nUse a small dataset: Construct or use a small dataset from HF Hub. When using a small dataset, you will often have to make sure sample_packing: False and eval_sample_packing: False to avoid errors. If you are in a pinch and don’t have time to construct a small dataset but want to use from the HF Hub, you can shard the data (this will still tokenize the entire dataset, but will only use a fraction of the data for training. For example, to shard the dataset into 20 pieces, add the following to your axolotl config):\ndatasets:\n ...\n shards: 20\nUse a small model: A good example of a small model is TinyLlama/TinyLlama-1.1B-Chat-v1.0.\nMinimize iteration time: Make sure the training loop finishes as fast as possible, with these settings.\n\nmicro_batch_size: 1\nmax_steps: 1\nval_set_size: 0\n\nClear Caches: Axolotl caches certain steps and so does the underlying HuggingFace trainer. You may want to clear some of these caches when debugging.\n\nData preprocessing: When debugging data preprocessing, which includes prompt template formation, you may want to delete the directory set in dataset_prepared_path: in your axolotl config. If you didn’t set this value, the default is last_run_prepared.\nHF Hub: If you are debugging data preprocessing, you should clear the relevant HF cache HuggingFace cache, by deleting the appropriate ~/.cache/huggingface/datasets/... folder(s).\nThe recommended approach is to redirect all outputs and caches to a temporary folder and delete selected subfolders before each run. This is demonstrated in the example configuration below.",
+ "text": "General Tips\nWhile debugging it’s helpful to simplify your test scenario as much as possible. Here are some tips for doing so:\n\n[!Important]\nAll of these tips are incorporated into the example configuration for debugging with VSCode below.\n\n\nMake sure you are using the latest version of axolotl: This project changes often and bugs get fixed fast. Check your git branch and make sure you have pulled the latest changes from main.\nEliminate concurrency: Restrict the number of processes to 1 for both training and data preprocessing:\n\nSet CUDA_VISIBLE_DEVICES to a single GPU, ex: export CUDA_VISIBLE_DEVICES=0.\nSet dataset_num_proc: 1 in your axolotl config or run the training command with --dataset_num_proc=1.\n\nUse a small dataset: Construct or use a small dataset from HF Hub. When using a small dataset, you will often have to make sure sample_packing: False and eval_sample_packing: False to avoid errors. If you are in a pinch and don’t have time to construct a small dataset but want to use from the HF Hub, you can shard the data (this will still tokenize the entire dataset, but will only use a fraction of the data for training. For example, to shard the dataset into 20 pieces, add the following to your axolotl config):\ndatasets:\n ...\n shards: 20\nUse a small model: A good example of a small model is TinyLlama/TinyLlama-1.1B-Chat-v1.0.\nMinimize iteration time: Make sure the training loop finishes as fast as possible, with these settings.\n\nmicro_batch_size: 1\nmax_steps: 1\nval_set_size: 0\n\nClear Caches: Axolotl caches certain steps and so does the underlying HuggingFace trainer. You may want to clear some of these caches when debugging.\n\nData preprocessing: When debugging data preprocessing, which includes prompt template formation, you may want to delete the directory set in dataset_prepared_path: in your axolotl config. If you didn’t set this value, the default is last_run_prepared.\nHF Hub: If you are debugging data preprocessing, you should clear the relevant HF cache HuggingFace cache, by deleting the appropriate ~/.cache/huggingface/datasets/... folder(s).\nThe recommended approach is to redirect all outputs and caches to a temporary folder and delete selected subfolders before each run. This is demonstrated in the example configuration below.",
"crumbs": [
"Troubleshooting",
"Debugging"
@@ -429,7 +429,7 @@
"href": "docs/debugging.html#debugging-with-vscode",
"title": "Debugging",
"section": "Debugging with VSCode",
- "text": "Debugging with VSCode\n\nBackground\nThe below example shows how to configure VSCode to debug data preprocessing of the chat_template format. This is the format used when you have the following in your axolotl config:\ndatasets:\n - path: <path to your chat_template formatted dataset> # example on HF Hub: fozziethebeat/alpaca_messages_2k_test\n type: chat_template\n\n[!Important]\nIf you are already familiar with advanced VSCode debugging, you can skip the below explanation and look at the files .vscode/launch.json and .vscode/tasks.json for an example configuration.\n\n\n[!Tip]\nIf you prefer to watch a video, rather than read, you can skip to the video tutorial below (but doing both is recommended).\n\n\n\nSetup\nMake sure you have an editable install of Axolotl, which ensures that changes you make to the code are reflected at runtime. Run the following commands from the root of this project:\npip3 install packaging\npip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'\n\nRemote Hosts\nIf you developing on a remote host, you can easily use VSCode to debug remotely. To do so, you will need to follow this remote - SSH guide. You can also see the video below on Docker and Remote SSH debugging.\n\n\n\nConfiguration\nThe easiest way to get started is to modify the .vscode/launch.json file in this project. This is just an example configuration, so you may need to modify or copy it to suit your needs.\nFor example, to mimic the command cd devtools && CUDA_VISIBLE_DEVICES=0 accelerate launch -m axolotl.cli.train dev_chat_template.yml, you would use the below configuration1. Note that we add additional flags that override the axolotl config and incorporate the tips above (see the comments). We also set the working directory to devtools and set the env variable HF_HOME to a temporary folder that is later partially deleted. This is because we want to delete the HF dataset cache before each run in order to ensure that the data preprocessing code is run from scratch.\n// .vscode/launch.json\n{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Debug axolotl prompt - chat_template\",\n \"type\": \"python\",\n \"module\": \"accelerate.commands.launch\",\n \"request\": \"launch\",\n \"args\": [\n \"-m\", \"axolotl.cli.train\", \"dev_chat_template.yml\",\n // The flags below simplify debugging by overriding the axolotl config\n // with the debugging tips above. Modify as needed.\n \"--dataset_processes=1\", // limits data preprocessing to one process\n \"--max_steps=1\", // limits training to just one step\n \"--batch_size=1\", // minimizes batch size\n \"--micro_batch_size=1\", // minimizes batch size\n \"--val_set_size=0\", // disables validation\n \"--sample_packing=False\", // disables sample packing which is necessary for small datasets\n \"--eval_sample_packing=False\",// disables sample packing on eval set\n \"--dataset_prepared_path=temp_debug/axolotl_outputs/data\", // send data outputs to a temp folder\n \"--output_dir=temp_debug/axolotl_outputs/model\" // send model outputs to a temp folder\n ],\n \"console\": \"integratedTerminal\", // show output in the integrated terminal\n \"cwd\": \"${workspaceFolder}/devtools\", // set working directory to devtools from the root of the project\n \"justMyCode\": true, // step through only axolotl code\n \"env\": {\"CUDA_VISIBLE_DEVICES\": \"0\", // Since we aren't doing distributed training, we need to limit to one GPU\n \"HF_HOME\": \"${workspaceFolder}/devtools/temp_debug/.hf-cache\"}, // send HF cache to a temp folder\n \"preLaunchTask\": \"cleanup-for-dataprep\", // delete temp folders (see below)\n }\n ]\n}\nAdditional notes about this configuration:\n\nThe argument justMyCode is set to true such that you step through only the axolotl code. If you want to step into dependencies, set this to false.\nThe preLaunchTask: cleanup-for-dataprep is defined in .vscode/tasks.json and is used to delete the following folders before debugging, which is essential to ensure that the data pre-processing code is run from scratch:\n\n./devtools/temp_debug/axolotl_outputs\n./devtools/temp_debug/.hf-cache/datasets\n\n\n\n[!Tip]\nYou may not want to delete these folders. For example, if you are debugging model training instead of data pre-processing, you may NOT want to delete the cache or output folders. You may also need to add additional tasks to the tasks.json file depending on your use case.\n\nBelow is the ./vscode/tasks.json file that defines the cleanup-for-dataprep task. This task is run before each debugging session when you use the above configuration. Note how there are two tasks that delete the two folders mentioned above. The third task cleanup-for-dataprep is a composite task that combines the two tasks. A composite task is necessary because VSCode does not allow you to specify multiple tasks in the preLaunchTask argument of the launch.json file.\n// .vscode/tasks.json\n// this file is used by launch.json\n{\n \"version\": \"2.0.0\",\n \"tasks\": [\n // this task changes into the devtools directory and deletes the temp_debug/axolotl_outputs folder\n {\n \"label\": \"delete-outputs\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/axolotl_outputs\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task changes into the devtools directory and deletes the `temp_debug/.hf-cache/datasets` folder\n {\n \"label\": \"delete-temp-hf-dataset-cache\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/.hf-cache/datasets\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task combines the two tasks above\n {\n \"label\": \"cleanup-for-dataprep\",\n \"dependsOn\": [\"delete-outputs\", \"delete-temp-hf-dataset-cache\"],\n }\n ]\n}\n\n\nCustomizing your debugger\nYour debugging use case may differ from the example above. The easiest thing to do is to put your own axolotl config in the devtools folder and modify the launch.json file to use your config. You may also want to modify the preLaunchTask to delete different folders or not delete anything at all.\n\n\nVideo Tutorial\nThe following video tutorial walks through the above configuration and demonstrates how to debug with VSCode, (click the image below to watch):\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl w/VSCode",
+ "text": "Debugging with VSCode\n\nBackground\nThe below example shows how to configure VSCode to debug data preprocessing of the chat_template format. This is the format used when you have the following in your axolotl config:\ndatasets:\n - path: <path to your chat_template formatted dataset> # example on HF Hub: fozziethebeat/alpaca_messages_2k_test\n type: chat_template\n\n[!Important]\nIf you are already familiar with advanced VSCode debugging, you can skip the below explanation and look at the files .vscode/launch.json and .vscode/tasks.json for an example configuration.\n\n\n[!Tip]\nIf you prefer to watch a video, rather than read, you can skip to the video tutorial below (but doing both is recommended).\n\n\n\nSetup\nMake sure you have an editable install of Axolotl, which ensures that changes you make to the code are reflected at runtime. Run the following commands from the root of this project:\npip3 install packaging\npip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'\n\nRemote Hosts\nIf you developing on a remote host, you can easily use VSCode to debug remotely. To do so, you will need to follow this remote - SSH guide. You can also see the video below on Docker and Remote SSH debugging.\n\n\n\nConfiguration\nThe easiest way to get started is to modify the .vscode/launch.json file in this project. This is just an example configuration, so you may need to modify or copy it to suit your needs.\nFor example, to mimic the command cd devtools && CUDA_VISIBLE_DEVICES=0 accelerate launch -m axolotl.cli.train dev_chat_template.yml, you would use the below configuration1. Note that we add additional flags that override the axolotl config and incorporate the tips above (see the comments). We also set the working directory to devtools and set the env variable HF_HOME to a temporary folder that is later partially deleted. This is because we want to delete the HF dataset cache before each run in order to ensure that the data preprocessing code is run from scratch.\n// .vscode/launch.json\n{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Debug axolotl prompt - chat_template\",\n \"type\": \"python\",\n \"module\": \"accelerate.commands.launch\",\n \"request\": \"launch\",\n \"args\": [\n \"-m\", \"axolotl.cli.train\", \"dev_chat_template.yml\",\n // The flags below simplify debugging by overriding the axolotl config\n // with the debugging tips above. Modify as needed.\n \"--dataset_num_proc=1\", // limits data preprocessing to one process\n \"--max_steps=1\", // limits training to just one step\n \"--batch_size=1\", // minimizes batch size\n \"--micro_batch_size=1\", // minimizes batch size\n \"--val_set_size=0\", // disables validation\n \"--sample_packing=False\", // disables sample packing which is necessary for small datasets\n \"--eval_sample_packing=False\",// disables sample packing on eval set\n \"--dataset_prepared_path=temp_debug/axolotl_outputs/data\", // send data outputs to a temp folder\n \"--output_dir=temp_debug/axolotl_outputs/model\" // send model outputs to a temp folder\n ],\n \"console\": \"integratedTerminal\", // show output in the integrated terminal\n \"cwd\": \"${workspaceFolder}/devtools\", // set working directory to devtools from the root of the project\n \"justMyCode\": true, // step through only axolotl code\n \"env\": {\"CUDA_VISIBLE_DEVICES\": \"0\", // Since we aren't doing distributed training, we need to limit to one GPU\n \"HF_HOME\": \"${workspaceFolder}/devtools/temp_debug/.hf-cache\"}, // send HF cache to a temp folder\n \"preLaunchTask\": \"cleanup-for-dataprep\", // delete temp folders (see below)\n }\n ]\n}\nAdditional notes about this configuration:\n\nThe argument justMyCode is set to true such that you step through only the axolotl code. If you want to step into dependencies, set this to false.\nThe preLaunchTask: cleanup-for-dataprep is defined in .vscode/tasks.json and is used to delete the following folders before debugging, which is essential to ensure that the data pre-processing code is run from scratch:\n\n./devtools/temp_debug/axolotl_outputs\n./devtools/temp_debug/.hf-cache/datasets\n\n\n\n[!Tip]\nYou may not want to delete these folders. For example, if you are debugging model training instead of data pre-processing, you may NOT want to delete the cache or output folders. You may also need to add additional tasks to the tasks.json file depending on your use case.\n\nBelow is the ./vscode/tasks.json file that defines the cleanup-for-dataprep task. This task is run before each debugging session when you use the above configuration. Note how there are two tasks that delete the two folders mentioned above. The third task cleanup-for-dataprep is a composite task that combines the two tasks. A composite task is necessary because VSCode does not allow you to specify multiple tasks in the preLaunchTask argument of the launch.json file.\n// .vscode/tasks.json\n// this file is used by launch.json\n{\n \"version\": \"2.0.0\",\n \"tasks\": [\n // this task changes into the devtools directory and deletes the temp_debug/axolotl_outputs folder\n {\n \"label\": \"delete-outputs\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/axolotl_outputs\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task changes into the devtools directory and deletes the `temp_debug/.hf-cache/datasets` folder\n {\n \"label\": \"delete-temp-hf-dataset-cache\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/.hf-cache/datasets\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task combines the two tasks above\n {\n \"label\": \"cleanup-for-dataprep\",\n \"dependsOn\": [\"delete-outputs\", \"delete-temp-hf-dataset-cache\"],\n }\n ]\n}\n\n\nCustomizing your debugger\nYour debugging use case may differ from the example above. The easiest thing to do is to put your own axolotl config in the devtools folder and modify the launch.json file to use your config. You may also want to modify the preLaunchTask to delete different folders or not delete anything at all.\n\n\nVideo Tutorial\nThe following video tutorial walks through the above configuration and demonstrates how to debug with VSCode, (click the image below to watch):\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl w/VSCode",
"crumbs": [
"Troubleshooting",
"Debugging"
@@ -788,7 +788,7 @@
"href": "docs/config-reference.html",
"title": "Config Reference",
"section": "",
- "text": "# Allow overwrite yml config using from cli\nstrict: bool | None = False\n# Resume from a specific checkpoint dir\nresume_from_checkpoint: str | None\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: bool | None\n# Resize the model embeddings when new tokens are added to multiples of 32. This is\n# reported to improve training speed on some models\nresize_token_embeddings_to_32x: bool | None\nmean_resizing_embeddings: bool | None = False\n\n# Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings: bool | None\n# Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs\nembeddings_skip_upcast: bool | None\n# Reinitialize model weights randomly instead of loading pretrained weights\nreinit_weights: bool | None\n\n# module to custom trainer class to use for training\ntrainer_cls: str | None\n\n# Use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl: RLType | None\n\ntrl: TRLConfig | None\n # For TRLConfig:\n # Beta parameter for the RL training. Same as `rl_beta`. Use\n beta: float | None\n # Maximum length of the completion for RL training.\n max_completion_length: int | None\n\n # Whether to use VLLM for RL training.\n use_vllm: bool = False\n # VLLM mode to use, one of 'server' or 'colocate'\n vllm_mode: Literal['server', 'colocate'] | None\n # Host of the vLLM server to connect to.\n vllm_server_host: str | None = 0.0.0.0\n # Port of the vLLM server to connect to.\n vllm_server_port: int | None = 8000\n # Total timeout (in seconds) to wait for the vLLM server to respond.\n vllm_server_timeout: int | None\n # Regex for vLLM guided decoding.\n vllm_guided_decoding_regex: str | None\n\n # List of reward functions to load. Paths must be importable from current dir.\n reward_funcs: list[str] | None\n # List of reward weights for the reward functions.\n reward_weights: list[float] | None\n # Number of generations to sample.\n num_generations: int | None\n # Whether to log completions.\n log_completions: bool | None = False\n # Number of completions to print when log_completions is True.\n num_completions_to_print: int | None\n # Controls whether importance sampling ratios are computed at the `'token'` or\n # `'sequence'` level. For GSPO, use `sequence`, default is None which corresponds to\n # the original GRPO paper.\n importance_sampling_level: Literal['sequence', 'token'] | None\n\n # Whether to sync the reference model.\n sync_ref_model: bool | None = False\n # Mixup alpha for the reference model.\n ref_model_mixup_alpha: float | None = 0.9\n # Sync steps for the reference model.\n ref_model_sync_steps: int | None = 64\n # Whether to scale rewards by their standard deviation.\n scale_rewards: bool = True\n\n # Sampling temperature for the GRPO policy.\n temperature: float | None\n # Top-p sampling probability for the generation policy.\n top_p: float | None\n # Top-k sampling for the generation policy.\n top_k: int | None\n # Minimum probability for the generation policy.\n min_p: float | None\n # Penalty for tokens that appear in prompt and generated text.\n repetition_penalty: float | None\n # Number of iterations per batch (μ) for GRPO.\n num_iterations: int | None\n # Epsilon value for clipping in the GRPO algorithm.\n epsilon: float | None\n # Upper-bound epsilon value for clipping in the GRPO algorithm.\n epsilon_high: float | None\n # Whether to use Liger loss for GRPO.\n use_liger_loss: bool | None\n # Loss formulation to use. Supported values: grpo, bnpo, dr_grpo.\n loss_type: str | None\n # Whether to exclude truncated completions from loss calculation.\n mask_truncated_completions: bool = False\n # Enable sleep mode for vLLM to offload VRAM when idle\n vllm_enable_sleep_mode: bool | None\n\nvllm: VllmConfig | None\n # For VllmConfig:\n # Device to use for VLLM\n device: str | None = auto\n # Tensor parallel size for VLLM\n tensor_parallel_size: int | None\n # Data parallel size for VLLM\n data_parallel_size: int | None\n # GPU memory utilization for VLLM\n gpu_memory_utilization: float | None = 0.9\n # Data type for VLLM\n dtype: str | None = auto\n # Maximum length of the model context for VLLM\n max_model_len: int | None\n # Enable prefix caching for VLLM\n enable_prefix_caching: bool | None\n # Host for the vLLM server to start on\n host: str | None = 0.0.0.0\n # Port of the vLLM server to start on\n port: int | None = 8000\n\n # Enable reasoning for VLLM\n enable_reasoning: bool | None\n # Reasoning parser for VLLM\n reasoning_parser: str | None\n\nqat: QATConfig | None\n # For QATConfig:\n # Fake quantization layout to use for activation quantization.\n activation_dtype: TorchAOQuantDType | None\n # Fake quantization layout to use for weight quantization.\n weight_dtype: TorchAOQuantDType = TorchAOQuantDType.int8\n # Quantize embedding\n quantize_embedding: bool | None = False\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n # The number of steps to apply fake quantization after\n fake_quant_after_n_steps: int | None\n\nquantization: PTQConfig | None\n # For PTQConfig:\n # Fake quantization layout to use for weight quantization.\n weight_dtype: TorchAOQuantDType = TorchAOQuantDType.int8\n # Fake quantization layout to use for activation quantization.\n activation_dtype: TorchAOQuantDType | None\n # Whether to quantize the embedding layer.\n quantize_embedding: bool | None\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n\n# Reward modelling: `True` or `False`\nreward_model: bool | None\n# Process reward modelling: `True` or `False`\nprocess_reward_model: bool | None\n# Coefficient to incentivize the reward model to output mean-zero rewards (proposed by\n# https://huggingface.co/papers/2312.09244, Eq. 2). Recommended value: `0.01`.\ncenter_rewards_coefficient: float | None\nnum_labels: int | None\n\n# Whether to perform weighting in DPO trainer\ndpo_use_weighting: bool | None\ndpo_use_logits_to_keep: bool | None\ndpo_label_smoothing: float | None\ndpo_norm_loss: bool | None\ndpo_padding_free: bool | None\ndpo_generate_during_eval: bool | None\n\n# A list of one or more datasets to finetune the model with\ndatasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n # Key containing the reasoning trace (default: \"reasoning_content\").\n field_thinking: str | None\n # The key the chat template expects that indicates the reasoning trace.\n template_thinking_key: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n# A list of one or more datasets to eval the model with. You can use either\n# test_datasets, or val_set_size, but not both.\ntest_datasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n # Key containing the reasoning trace (default: \"reasoning_content\").\n field_thinking: str | None\n # The key the chat template expects that indicates the reasoning trace.\n template_thinking_key: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n# If false, the datasets will not be shuffled and will keep their original order in\n# `datasets`. The same applies to the `test_datasets` option and the\n# `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: bool | None = True\n# If true, each dataset in `datasets` will be shuffled before merging. This allows\n# curriculum learning strategies to be applied at the dataset level. Default is false.\nshuffle_before_merging_datasets: bool | None = False\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: str | None\n# Num shards for whole dataset\ndataset_shard_num: int | None\n# Index of shard to use for whole dataset\ndataset_shard_idx: int | None\nskip_prepare_dataset: bool | None = False\n# Number of shards to save the prepared dataset\nnum_dataset_shards_to_save: int | None\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset: Annotated[list[PretrainingDataset | SFTDataset], MinLen(1)] | None\n # For PretrainingDataset:\n name: str | None\n path: str | None\n split: str | None = train\n text_column: str | None = text\n type: str | None = pretrain\n trust_remote_code: bool | None = False\n data_files: str | None\n skip: int | None\n\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n # Key containing the reasoning trace (default: \"reasoning_content\").\n field_thinking: str | None\n # The key the chat template expects that indicates the reasoning trace.\n template_thinking_key: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n# The maximum number of processes to use while preprocessing your input dataset. This\n# defaults to `os.cpu_count()` if not set. For Runpod VMs, it will default to number of\n# vCPUs via RUNPOD_CPU_COUNT.\ndataset_processes: int | None\n# Deduplicates datasets and test_datasets with identical entries\ndataset_exact_deduplication: bool | None\n# Keep dataset in memory while preprocessing. Only needed if cached dataset is taking\n# too much storage\ndataset_keep_in_memory: bool | None\ndataloader_pin_memory: bool | None\ndataloader_num_workers: int | None\ndataloader_prefetch_factor: int | None\ndataloader_drop_last: bool | None\n\naccelerator_config: dict[str, Any] | None\n\nremove_unused_columns: bool | None\n\n# Push prepared dataset to hub - repo_org/repo_name\npush_dataset_to_hub: str | None\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private\n# datasets. Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: bool | None\n\ndevice: Any | None\n# Passed through to transformers when loading the model when launched without\n# accelerate. Use `sequential` when training w/ model parallelism to limit memory\ndevice_map: Any | None\nworld_size: int | None\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank: int | None\nddp: bool | None\n\n# Seed for reproducibility\nseed: int | None\n# Advanced DDP Arguments - timeout\nddp_timeout: int | None\n# Advanced DDP Arguments - bucket cap in MB\nddp_bucket_cap_mb: int | None\n# Advanced DDP Arguments - broadcast buffers\nddp_broadcast_buffers: bool | None\nddp_find_unused_parameters: bool | None\n\n# Approximate number of predictions sent to wandb depending on batch size. Enabled above\n# 0. Default is 0\neval_table_size: int | None\n# Total number of tokens generated for predictions sent to wandb. Default is 128\neval_max_new_tokens: int | None\n# Whether to run causal language model evaluation for metrics in\n# `eval_causal_lm_metrics`\ndo_causal_lm_eval: bool | None\n# HF evaluate metrics used during evaluation. Default is ['sacrebleu', 'comet', 'ter',\n# 'chrf', 'perplexity']\neval_causal_lm_metrics: list[str] | None\ndo_bench_eval: bool | None\nbench_dataset: str | None\nbench_split: str | None\nmetric_for_best_model: str | None\ngreater_is_better: bool | None\n\n# High loss value, indicating the learning has broken down (a good estimate is ~2 times\n# the loss at the start of training)\nloss_watchdog_threshold: float | None\n# Number of high-loss steps in a row before the trainer aborts (default: 3)\nloss_watchdog_patience: int | None\n\n# Run garbage collection every `gc_steps` steps. -1 will run on epoch end and before\n# evaluations. Default is 0 (disabled).\ngc_steps: int | None\n\n# Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection.\n# require >=ampere\nbf16: Literal['auto'] | bool | None = auto\n# Use CUDA fp16\nfp16: bool | None\n# Enable FP8 mixed precision training using TorchAO. Best used in combination with\n# torch.compile.\nfp8: bool | None\n# Enable FSDP float8 all-gather optimization for FP8 training. Can improve training\n# speed by 10-15% when FSDP is enabled.\nfp8_enable_fsdp_float8_all_gather: bool | None\n# No AMP (automatic mixed precision) - require >=ampere\nbfloat16: bool | None\n# No AMP (automatic mixed precision)\nfloat16: bool | None\n# Use CUDA tf32 - require >=ampere\ntf32: bool | None\nfloat32: bool | None\n\n# Whether to use gradient checkpointing. Available options are: true, false, 'offload',\n# 'offload_disk'.\n# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: Literal['offload', 'offload_disk'] | bool | None = False\n# Additional kwargs to pass to the trainer for gradient checkpointing\ngradient_checkpointing_kwargs: dict[str, Any] | None\n# Whether to offload activations. Available options are: true, false, 'legacy', 'disk'.\nactivation_offloading: Literal['legacy', 'disk'] | bool | None = False\n\nunfrozen_parameters: list[str] | None\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: int = 512\n# What to do when a tokenized row exceeds sequence_len. 'drop' removes the row;\n# 'truncate' slices tensors to sequence_len. Defaults to 'drop' for backward\n# compatibility.\nexcess_length_strategy: Literal['drop', 'truncate'] | None\n# The maximum length of an input for evaluation. If not specified, defaults to\n# sequence_len\neval_sequence_len: int | None\nmin_sample_len: int | None\n# maximum prompt length for RL training\nmax_prompt_len: int | None\n# Use efficient multi-packing with block diagonal attention and per sequence\n# position_ids. Recommend set to 'true'\nsample_packing: bool | None\n# The number of samples packed at a time. Increasing the following values helps with\n# packing, but usually only slightly (<%1.)\nsample_packing_group_size: int | None = 100000\n# The number of samples which can be packed into one sequence. Increase if using a large\n# sequence_len with many short samples.\nsample_packing_bin_size: int | None = 200\n# Whether to pack samples sequentially\nsample_packing_sequentially: bool | None\n# The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or\n# 'forkserver'\nsample_packing_mp_start_method: str | None\n# Set to 'false' if getting errors during eval with sample_packing on\neval_sample_packing: bool | None\n# Pad inputs so each step uses constant sized buffers. This will reduce memory\n# fragmentation and may prevent OOMs, by re-using memory more efficiently. Defaults to\n# True if `sample_packing` enabled\npad_to_sequence_len: bool | None\n# Whether to use sequential sampling for curriculum learning\ncurriculum_sampling: bool | None\nmultipack_real_batches: bool | None\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening: Literal['auto'] | bool | None\n\nuse_pose: bool | None\npose_split_on_token_ids: list[int] | None\npose_max_context_len: int | None\npose_num_chunks: int | None\n\npretrain_multipack_buffer_size: int | None\n# whether to prevent cross attention for packed sequences during pretraining\npretrain_multipack_attn: bool | None = True\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation: bool | None\n\n# Use streaming mode for loading datasets\nstreaming: bool | None\n# Buffer size for multipack streaming datasets\nstreaming_multipack_buffer_size: int | None = 10000\n\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers\nxformers_attention: bool | None\n# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/\n# torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention: bool | None\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention: bool | None\nflex_attention: bool | None\nflex_attn_compile_kwargs: dict[str, Any] | None\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention\nflash_attention: bool | None\n# Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_cross_entropy: bool | None\n# Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_rms_norm: bool | None\n# Whether to fuse part of the MLP into a single operation\nflash_attn_fuse_mlp: bool | None\n# Whether to use bettertransformers\nflash_optimum: bool | None\n\neager_attention: bool | None\n\n# Specify a custom attention implementation, used mostly for kernels.\nattn_implementation: str | None\n\nunsloth_cross_entropy_loss: bool | None\nunsloth_lora_mlp: bool | None\nunsloth_lora_qkv: bool | None\nunsloth_lora_o: bool | None\nunsloth_rms_norm: bool | None\nunsloth_rope: bool | None\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_mlp_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_qkv_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_o_kernel: bool | None\n\n# Whether to use chunked cross entropy loss for memory efficiency\nchunked_cross_entropy: bool | None\n# Number of chunks to use for chunked cross entropy loss\nchunked_cross_entropy_num_chunks: int | None\n\n# Whether to use ALST tiled mlp for memory efficient long context\ntiled_mlp: bool | None\n\n# Number of shards to use for ALST tiled mlp. If unset, it will be set based on\n# seqlen/hidden_size\ntiled_mlp_num_shards: int | None\n\n# Whether to use original mlp for ALST tiled mlp. Otherwise uses a generic MLP based on\n# llama.\ntiled_mlp_use_original_mlp: bool | None = True\n\nllama4_linearized_experts: bool | None\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed: str | dict[str, Any] | None\n# Whether to use deepcompile for faster training with deepspeed\ndeepcompile: bool | None\n# FSDP configuration\nfsdp: list[str] | None\n\n# FSDP configuration options\nfsdp_config: FSDPConfig | None\n # For FSDPConfig:\n # Enable activation checkpointing to reduce memory usage during forward passes\n activation_checkpointing: bool | None\n # Offload parameters to CPU to reduce GPU memory usage\n offload_params: bool | None\n # Synchronize module states across all processes\n sync_module_states: bool | None\n # Enable CPU RAM efficient loading to reduce memory usage during model loading\n cpu_ram_efficient_loading: bool | None\n # Disabling this enables swap memory usage for resource-constrained setups when\n # offload_params is enabled.\n cpu_offload_pin_memory: bool | None\n # Use original parameters instead of flattened parameters\n use_orig_params: bool | None\n\n # Type of state dict to use for saving/loading checkpoints\n state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n # Final state dict type to use after training completion\n final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n\n # Policy for automatically wrapping modules with FSDP\n auto_wrap_policy: Literal['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP'] | None\n # Class name of transformer layers to wrap (e.g., 'LlamaDecoderLayer')\n transformer_layer_cls_to_wrap: str | None\n\n # Reshard parameters after forward pass to save memory\n reshard_after_forward: bool | None\n # Mixed precision policy for FSDP (e.g., 'fp16', 'bf16')\n mixed_precision_policy: str | None\n\n# FSDP version\nfsdp_version: int | None\nfsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for\n# no eval.\nval_set_size: float | None = 0.0\n\n# Number of devices to shard across. If not set, will use all available devices.\ndp_shard_size: int | None\n# Number of devices to replicate across.\ndp_replicate_size: int | None\n# Deprecated: use `context_parallel_size` instead\nsequence_parallel_degree: int | None\n# Set to a divisor of the number of GPUs available to split sequences into chunks of\n# equal size. Use in long context training to prevent OOM when sequences cannot fit into\n# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each\n# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized\n# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more\n# details.\ncontext_parallel_size: int | None\n# Optional; strides across the key dimension. Larger values use more memory but should\n# make training faster. Must evenly divide the number of KV heads in your model.\nheads_k_stride: int | None\n# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to\n# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing\n# case.\nring_attn_func: RingAttnFunc | None\n# Number of tensor parallel processes in TP group. Only supported with DeepSpeed AutoTP.\ntensor_parallel_size: int | None\n\n# Add or change special tokens. If you add tokens here, you don't need to add them to\n# the `tokens` list.\nspecial_tokens: SpecialTokensConfig | None\n # For SpecialTokensConfig:\n bos_token: str | None\n eos_token: str | None\n pad_token: str | None\n unk_token: str | None\n additional_special_tokens: list[str] | None\n\n# Add extra tokens to the tokenizer\ntokens: list[str] | None\n# Mapping token_id to new_token_string to override reserved added_tokens in the\n# tokenizer. Only works for tokens that are not part of the base vocab (aka are\n# added_tokens). Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: dict[int, str] | None\n\n# Whether to use torch.compile and which backend to use. setting to `auto` will enable\n# torch compile when torch>=2.6.0\ntorch_compile: Literal['auto'] | bool | None\n# Backend to use for torch.compile\ntorch_compile_backend: str | None\ntorch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None\n\n# Maximum number of iterations to train for. It precedes num_epochs which means that if\n# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =>\n# `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps: int | None\n# Number of warmup steps. Cannot use with warmup_ratio\nwarmup_steps: int | None\n# Warmup ratio. Cannot use with warmup_steps\nwarmup_ratio: float | None\n# Leave empty to eval at each epoch, integer for every N steps. float for fraction of\n# total steps\neval_steps: int | float | None\n# Number of times per epoch to run evals, mutually exclusive with eval_steps\nevals_per_epoch: int | None\n# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer\n# from `eval_steps`\neval_strategy: str | None\n\n# Leave empty to save at each epoch, integer for every N steps. float for fraction of\n# total steps\nsave_steps: int | float | None\n# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsaves_per_epoch: int | None\n# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better\n# result is achieved, leave empty to infer from `save_steps`\nsave_strategy: str | None\n# Checkpoints saved at a time\nsave_total_limit: int | None\n# Whether to checkpoint a model after the first step of training. Defaults to False.\nsave_first_step: bool | None\n\n# Logging frequency\nlogging_steps: int | None\n# Stop training after this many evaluation losses have increased in a row. https://huggi\n# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin\n# gCallback\nearly_stopping_patience: int | None\nload_best_model_at_end: bool | None = False\n# Save only the model weights, skipping the optimizer. Using this means you can't resume\n# from checkpoints.\nsave_only_model: bool | None = False\n# Use tensorboard for logging\nuse_tensorboard: bool | None\n# Enable the pytorch profiler to capture the first N steps of training to the\n# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more\n# information. Snapshots can be visualized @ https://pytorch.org/memory_viz\nprofiler_steps: int | None\n# Which step to start the profiler at. Useful for only capturing a few steps mid-run.\nprofiler_steps_start: int | None = 0\n# bool of whether to report tokens per second at the end of training. This is not\n# supported with pre-training datasets.\ninclude_tokens_per_second: bool | None\n# bool of whether to report tokens per second per-gpu during training by measuring\n# throughput of non-padding tokens.\ninclude_tkps: bool | None = True\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to\n# add noise to embeddings. Currently only supported on Llama and Mistral\nneftune_noise_alpha: float | None\n\n# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to\n# `beta` in `ORPOConfig` due to trl mapping.\norpo_alpha: float | None\n# Weighting of NLL term in loss from RPO paper\nrpo_alpha: float | None\n# Target reward margin for the SimPO loss\nsimpo_gamma: float | None\n# Weight of the BC regularizer\ncpo_alpha: float | None\n\n# Factor for desirable loss term in KTO loss\nkto_desirable_weight: float | None\n# Factor for undesirable loss term in KTO loss\nkto_undesirable_weight: float | None\n# The beta parameter for the RL training\nrl_beta: float | None\n\n# Defines the max memory usage per gpu on the system. Passed through to transformers\n# when loading the model.\nmax_memory: dict[int | Literal['cpu', 'disk'], int | str] | None\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in\n# gigabytes); default: unset\ngpu_memory_limit: int | str | None\n# Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage: bool | None\n\n# The name of the chat template to use for training, following values are supported:\n# tokenizer_default: Uses the chat template that is available in the\n# tokenizer_config.json. If the chat template is not available in the tokenizer, it will\n# raise an error. This is the default value.\n# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.\n# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not\n# available in the tokenizer. jinja: Uses a custom jinja template for the chat template.\n# The custom jinja template should be provided in the chat_template_jinja field. The\n# selected chat template will be saved to the tokenizer_config.json for easier\n# inferencing\nchat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None\n# Custom jinja template or path to jinja file for chat template. This will be only used\n# if chat_template is set to `jinja` or `null` (in which case chat_template is\n# automatically set to `jinja`). Default is null.\nchat_template_jinja: str | None\n# Additional kwargs to pass to the chat template. This is useful for customizing the\n# chat template. For example, you can pass `thinking=False` to add a generation prompt\n# to the chat template.\nchat_template_kwargs: dict[str, Any] | None\n# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the\n# boundaries between conversation turns. For example: ['/INST', '</s>',\n# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is\n# useful for templates that use multiple delimiter tokens.\neot_tokens: list[str] | None\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: str | None\n\n# Token index or indices to adjust embedding weights to the mean of the other tokens.\n# This is useful when the model has untrained embeddings.\nfix_untrained_tokens: int | list[int] | None\n\nis_preprocess: bool | None\npreprocess_iterable: bool | None\n\n# Total number of tokens - internal use\ntotal_num_tokens: int | None\ntotal_supervised_tokens: int | None\n# You can set these packing optimizations AFTER starting a training at least once. The\n# trainer will provide recommended values for these values.\nsample_packing_eff_est: float | None\naxolotl_config_path: str | None\n\n# Internal use only - Used to identify which the model is based on\nis_falcon_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_llama_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on. Please note that if\n# you set this to true, `padding_side` will be set to 'left' by default\nis_mistral_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_qwen_derived_model: bool | None\n\n# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available\n# plugins or doc below for more details.\n# https://docs.axolotl.ai/docs/custom_integrations.html\nplugins: list[str] | None\n\n# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This\n# can also be a relative path to a model on disk\nbase_model: str (required)\n# If the base_model repo on hf hub doesn't include configuration .json files, You can\n# set that here, or leave this empty to default to base_model\nbase_model_config: str | None\ncls_model_config: str | None\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config: str | None\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast: bool | None\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy: bool | None\n# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-\n# common tokenizer.\ntokenizer_use_mistral_common: bool | None\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: str | None\n# transformers processor class\nprocessor_type: str | None\n# Whether to save jinja files for tokenizer, transformers default is True\ntokenizer_save_jinja_files: bool | None = True\n# Trust remote code for untrusted source\ntrust_remote_code: bool | None\n\n# Don't move the model to the device before sharding. Set to `false` to revert to legacy\n# behavior.\nexperimental_skip_move_to_device: bool | None = True\n\n# Use custom kernels, e.g. MegaBlocks.\nuse_kernels: bool | None\n\n# Model loading quantization config\nmodel_quantization_config: Literal['Mxfp4Config'] | None\n# kwargs for model quantization config\nmodel_quantization_config_kwargs: dict[str, Any] | None\n\n# Where to save the full-finetuned model to\noutput_dir: str = ./model-out\n# push checkpoints to hub\nhub_model_id: str | None\n# how to push checkpoints to hub\nhub_strategy: str | None\n# Save model as safetensors (require safetensors package). Default True\nsave_safetensors: bool | None = True\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: bool | None = False\n# Use bitsandbytes 4 bit\nload_in_4bit: bool | None = False\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in\n# original model\nadapter: str | None\n# If you already have a lora model trained that you want to load, put that here. This\n# means after training, if you want to test the model, you should set this to the value\n# of `output_dir`. Note that if you merge an adapter to the base model, a new\n# subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir: str | None\nlora_r: int | None\nlora_alpha: int | None\nlora_fan_in_fan_out: bool | None\nlora_target_modules: str | list[str] | None\nlora_target_parameters: str | list[str] | None\n# If true, will target all linear modules\nlora_target_linear: bool | None\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules\n# because they need to know the new tokens. For LLaMA and Mistral, you need to save\n# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts\n# tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\nlora_modules_to_save: list[str] | None\nlora_dropout: float | None = 0.0\n# The layer indices to transform, otherwise, apply to all layers\npeft_layers_to_transform: list[int] | None\npeft_layers_pattern: list[str] | None\n\npeft: PeftConfig | None\n # For PeftConfig:\n # Configuration options for loftq initialization for LoRA\n loftq_config: LoftQConfig | None\n # For LoftQConfig:\n # typically 4 bits\n loftq_bits: int = 4\n\n# Whether to use DoRA.\npeft_use_dora: bool | None\n# Whether to use RSLoRA.\npeft_use_rslora: bool | None\n# List of layer indices to replicate.\npeft_layer_replication: list[tuple[int, int]] | None\n# How to initialize LoRA weights. Default to True which is MS original implementation.\npeft_init_lora_weights: bool | str | None\n# A list of token indices to fine-tune on the `embed_tokens` layer. Otherwise, a dict\n# mapping an embedding layer name to its trainable token indices. See\n# https://huggingface.co/docs/peft/v0.17.0/en/developer_guides/lora#efficiently-train-\n# tokens-alongside-lora\npeft_trainable_token_indices: list[int] | dict[str, list[int]] | None\n\n# load qlora model in sharded format for FSDP using answer.ai technique.\nqlora_sharded_model_loading: bool | None = False\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it\n# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: bool | None\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: bool | None\n# optional overrides to the bnb 4bit quantization configuration\nbnb_config_kwargs: dict[str, Any] | None\n\n# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_ratio: float | None\n# loraplus learning rate for lora embedding layers. Default value is 1e-6.\nloraplus_lr_embedding: float | None = 1e-06\n\nmerge_lora: bool | None\n\n# Whether to use ReLoRA. Use with jagged_restart_*steps options.\nrelora: bool | None\n# threshold for optimizer magnitude when pruning\nrelora_prune_ratio: float | None\n# True to perform lora weight merges on cpu during restarts, for modest gpu memory\n# savings\nrelora_cpu_offload: bool | None\n\n# how often to reset for jagged restarts\njagged_restart_steps: int | None\n# how many warmup steps to take after reset for jagged restarts\njagged_restart_warmup_steps: int | None\n# how many anneal steps to take before reset for jagged restarts\njagged_restart_anneal_steps: int | None\n\n# If greater than 1, backpropagation will be skipped and the gradients will be\n# accumulated for the given number of steps.\ngradient_accumulation_steps: int | None = 1\n# The number of samples to include in each batch. This is the number of samples sent to\n# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: int | None = 1\n# Total batch size, we do not recommended setting this manually\nbatch_size: int | None\n# per gpu micro batch size for evals, defaults to value of micro_batch_size\neval_batch_size: int | None\n\n# whether to find batch size that fits in memory. Passed to underlying transformers\n# Trainer\nauto_find_batch_size: bool | None\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: bool | None = False\n# Group similarly sized data to minimize padding. May be slower to start, as it must\n# download and sort the entire dataset. Note that training loss may have an oscillating\n# pattern with this enabled.\ngroup_by_length: bool | None\n\nlearning_rate: str | float (required)\nembedding_lr: float | None\nembedding_lr_scale: float | None\n# Specify weight decay\nweight_decay: float | None = 0.0\n# Specify optimizer\noptimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED\n# Dictionary of arguments to pass to the optimizer\noptim_args: str | dict[str, Any] | None\n# The target modules to optimize, i.e. the module names that you would like to train,\n# right now this is used only for GaLore algorithm\noptim_target_modules: list[str] | Literal['all_linear'] | None\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path: str | None\nlr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler_kwargs: dict[str, Any] | None\nlr_quadratic_warmup: bool | None\n# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of\n# peak lr\ncosine_min_lr_ratio: float | None\n# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means\n# start cosine_min_lr at 80% of training step\ncosine_constant_lr_ratio: float | None\n# Learning rate div factor\nlr_div_factor: float | None\n\nlr_groups: list[LrGroup] | None\n # For LrGroup:\n name: str (required)\n modules: list[str] (required)\n lr: float (required)\n\n# adamw hyperparams\nadam_epsilon: float | None\n# only used for CAME Optimizer\nadam_epsilon2: float | None\n# adamw hyperparams\nadam_beta1: float | None\n# adamw hyperparams\nadam_beta2: float | None\n# only used for CAME Optimizer\nadam_beta3: float | None\n\n# Dion Optimizer learning rate\ndion_lr: float | None\n# Dion Optimizer momentum\ndion_momentum: float | None\n# Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank\n# dimension.\ndion_rank_fraction: float | None = 1.0\n# Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may\n# be useful to ensure even sharding.\ndion_rank_multiple_of: int | None = 1\n\n# Gradient clipping max norm\nmax_grad_norm: float | None\nnum_epochs: float = 1.0\n\nuse_wandb: bool | None\n# Set the name of your wandb run\nwandb_name: str | None\n# Set the ID of your wandb run\nwandb_run_id: str | None\n# \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn\n# off wandb\nwandb_mode: str | None\n# Your wandb project name\nwandb_project: str | None\n# A wandb Team name if using a Team\nwandb_entity: str | None\nwandb_watch: str | None\n# \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only\n# at the end of training\nwandb_log_model: str | None\n\nuse_mlflow: bool | None\n# URI to mlflow\nmlflow_tracking_uri: str | None\n# Your experiment name\nmlflow_experiment_name: str | None\n# Your run name\nmlflow_run_name: str | None\n# set to true to copy each saved checkpoint on each save to mlflow artifact registry\nhf_mlflow_log_artifacts: bool | None\n\n# Enable or disable Comet integration.\nuse_comet: bool | None\n# API key for Comet. Recommended to set via `comet login`.\ncomet_api_key: str | None\n# Workspace name in Comet. Defaults to the user's default workspace.\ncomet_workspace: str | None\n# Project name in Comet. Defaults to Uncategorized.\ncomet_project_name: str | None\n# Identifier for the experiment. Used to append data to an existing experiment or\n# control the key of new experiments. Default to a random key.\ncomet_experiment_key: str | None\n# Create a new experiment (\"create\") or log to an existing one (\"get\"). Default\n# (\"get_or_create\") auto-selects based on configuration.\ncomet_mode: str | None\n# Set to True to log data to Comet server, or False for offline storage. Default is\n# True.\ncomet_online: bool | None\n# Dictionary for additional configuration settings, see the doc for more details.\ncomet_experiment_config: dict[str, Any] | None\n\n# the number of activate layers in LISA\nlisa_n_layers: int | None\n# how often to switch layers in LISA\nlisa_step_interval: int | None\n# path under the model to access the layers\nlisa_layers_attribute: str | None = model.layers\n\ngradio_title: str | None\ngradio_share: bool | None\ngradio_server_name: str | None\ngradio_server_port: int | None\ngradio_max_new_tokens: int | None\ngradio_temperature: float | None\n\nuse_ray: bool = False\nray_run_name: str | None\nray_num_workers: int = 1\nresources_per_worker: dict\n\n# The size of the image to resize to. It can be an integer (resized into padded-square\n# image) or a tuple (width, height).If not provided, we will attempt to load from\n# preprocessor.size, otherwise, images won't be resized.\nimage_size: int | tuple[int, int] | None\n# The resampling algorithm to use for image resizing. Default is bilinear. Please refer\n# to PIL.Image.Resampling for more details.\nimage_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None\n\n# optional overrides to the base model configuration\noverrides_of_model_config: dict[str, Any] | None\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs: dict[str, Any] | None\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good\n# choice too\ntype_of_model: str | None\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model: str | None\n\nmax_packed_sequence_len: int | None\nrope_scaling: Any | None\nnoisy_embedding_alpha: float | None\ndpo_beta: float | None\nevaluation_strategy: str | None",
+ "text": "# Allow overwrite yml config using from cli\nstrict: bool | None = False\n# Resume from a specific checkpoint dir\nresume_from_checkpoint: str | None\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: bool | None\n# Resize the model embeddings when new tokens are added to multiples of 32. This is\n# reported to improve training speed on some models\nresize_token_embeddings_to_32x: bool | None\nmean_resizing_embeddings: bool | None = False\n\n# Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings: bool | None\n# Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs\nembeddings_skip_upcast: bool | None\n# Reinitialize model weights randomly instead of loading pretrained weights\nreinit_weights: bool | None\n\n# module to custom trainer class to use for training\ntrainer_cls: str | None\n\n# Use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl: RLType | None\n\ntrl: TRLConfig | None\n # For TRLConfig:\n # Beta parameter for the RL training. Same as `rl_beta`. Use\n beta: float | None\n # Maximum length of the completion for RL training.\n max_completion_length: int | None\n\n # Whether to use VLLM for RL training.\n use_vllm: bool = False\n # VLLM mode to use, one of 'server' or 'colocate'\n vllm_mode: Literal['server', 'colocate'] | None\n # Host of the vLLM server to connect to.\n vllm_server_host: str | None = 0.0.0.0\n # Port of the vLLM server to connect to.\n vllm_server_port: int | None = 8000\n # Total timeout (in seconds) to wait for the vLLM server to respond.\n vllm_server_timeout: int | None\n # Regex for vLLM guided decoding.\n vllm_guided_decoding_regex: str | None\n\n # List of reward functions to load. Paths must be importable from current dir.\n reward_funcs: list[str] | None\n # List of reward weights for the reward functions.\n reward_weights: list[float] | None\n # Number of generations to sample.\n num_generations: int | None\n # Whether to log completions.\n log_completions: bool | None = False\n # Number of completions to print when log_completions is True.\n num_completions_to_print: int | None\n # Controls whether importance sampling ratios are computed at the `'token'` or\n # `'sequence'` level. For GSPO, use `sequence`, default is None which corresponds to\n # the original GRPO paper.\n importance_sampling_level: Literal['sequence', 'token'] | None\n\n # Whether to sync the reference model.\n sync_ref_model: bool | None = False\n # Mixup alpha for the reference model.\n ref_model_mixup_alpha: float | None = 0.9\n # Sync steps for the reference model.\n ref_model_sync_steps: int | None = 64\n # Whether to scale rewards by their standard deviation.\n scale_rewards: bool = True\n\n # Sampling temperature for the GRPO policy.\n temperature: float | None\n # Top-p sampling probability for the generation policy.\n top_p: float | None\n # Top-k sampling for the generation policy.\n top_k: int | None\n # Minimum probability for the generation policy.\n min_p: float | None\n # Penalty for tokens that appear in prompt and generated text.\n repetition_penalty: float | None\n # Number of iterations per batch (μ) for GRPO.\n num_iterations: int | None\n # Epsilon value for clipping in the GRPO algorithm.\n epsilon: float | None\n # Upper-bound epsilon value for clipping in the GRPO algorithm.\n epsilon_high: float | None\n # Whether to use Liger loss for GRPO.\n use_liger_loss: bool | None\n # Loss formulation to use. Supported values: grpo, bnpo, dr_grpo.\n loss_type: str | None\n # Whether to exclude truncated completions from loss calculation.\n mask_truncated_completions: bool = False\n # Enable sleep mode for vLLM to offload VRAM when idle\n vllm_enable_sleep_mode: bool | None\n\nvllm: VllmConfig | None\n # For VllmConfig:\n # Device to use for VLLM\n device: str | None = auto\n # Tensor parallel size for VLLM\n tensor_parallel_size: int | None\n # Data parallel size for VLLM\n data_parallel_size: int | None\n # GPU memory utilization for VLLM\n gpu_memory_utilization: float | None = 0.9\n # Data type for VLLM\n dtype: str | None = auto\n # Maximum length of the model context for VLLM\n max_model_len: int | None\n # Enable prefix caching for VLLM\n enable_prefix_caching: bool | None\n # Host for the vLLM server to start on\n host: str | None = 0.0.0.0\n # Port of the vLLM server to start on\n port: int | None = 8000\n\n # Enable reasoning for VLLM\n enable_reasoning: bool | None\n # Reasoning parser for VLLM\n reasoning_parser: str | None\n\nqat: QATConfig | None\n # For QATConfig:\n # Fake quantization layout to use for activation quantization.\n activation_dtype: TorchAOQuantDType | None\n # Fake quantization layout to use for weight quantization.\n weight_dtype: TorchAOQuantDType = TorchAOQuantDType.int8\n # Quantize embedding\n quantize_embedding: bool | None = False\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n # The number of steps to apply fake quantization after\n fake_quant_after_n_steps: int | None\n\nquantization: PTQConfig | None\n # For PTQConfig:\n # Fake quantization layout to use for weight quantization.\n weight_dtype: TorchAOQuantDType = TorchAOQuantDType.int8\n # Fake quantization layout to use for activation quantization.\n activation_dtype: TorchAOQuantDType | None\n # Whether to quantize the embedding layer.\n quantize_embedding: bool | None\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n\n# Reward modelling: `True` or `False`\nreward_model: bool | None\n# Process reward modelling: `True` or `False`\nprocess_reward_model: bool | None\n# Coefficient to incentivize the reward model to output mean-zero rewards (proposed by\n# https://huggingface.co/papers/2312.09244, Eq. 2). Recommended value: `0.01`.\ncenter_rewards_coefficient: float | None\nnum_labels: int | None\n\n# Whether to perform weighting in DPO trainer\ndpo_use_weighting: bool | None\ndpo_use_logits_to_keep: bool | None\ndpo_label_smoothing: float | None\ndpo_norm_loss: bool | None\ndpo_padding_free: bool | None\ndpo_generate_during_eval: bool | None\n\n# A list of one or more datasets to finetune the model with\ndatasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n # Key containing the reasoning trace (default: \"reasoning_content\").\n field_thinking: str | None\n # The key the chat template expects that indicates the reasoning trace.\n template_thinking_key: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n# A list of one or more datasets to eval the model with. You can use either\n# test_datasets, or val_set_size, but not both.\ntest_datasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n # Key containing the reasoning trace (default: \"reasoning_content\").\n field_thinking: str | None\n # The key the chat template expects that indicates the reasoning trace.\n template_thinking_key: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n# If false, the datasets will not be shuffled and will keep their original order in\n# `datasets`. The same applies to the `test_datasets` option and the\n# `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: bool | None = True\n# If true, each dataset in `datasets` will be shuffled before merging. This allows\n# curriculum learning strategies to be applied at the dataset level. Default is false.\nshuffle_before_merging_datasets: bool | None = False\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: str | None\n# Num shards for whole dataset\ndataset_shard_num: int | None\n# Index of shard to use for whole dataset\ndataset_shard_idx: int | None\nskip_prepare_dataset: bool | None = False\n# Number of shards to save the prepared dataset\nnum_dataset_shards_to_save: int | None\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset: Annotated[list[PretrainingDataset | SFTDataset], MinLen(1)] | None\n # For PretrainingDataset:\n name: str | None\n path: str | None\n split: str | None = train\n text_column: str | None = text\n type: str | None = pretrain\n trust_remote_code: bool | None = False\n data_files: str | None\n skip: int | None\n\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n # Key containing the reasoning trace (default: \"reasoning_content\").\n field_thinking: str | None\n # The key the chat template expects that indicates the reasoning trace.\n template_thinking_key: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n# The maximum number of processes to use while preprocessing your input dataset. This\n# defaults to `os.cpu_count()` if not set. For Runpod VMs, it will default to number of\n# vCPUs via RUNPOD_CPU_COUNT.\ndataset_processes: int | None\n# The maximum number of processes to use while preprocessing your input dataset. This\n# defaults to `os.cpu_count()` if not set. For Runpod VMs, it will default to number of\n# vCPUs via RUNPOD_CPU_COUNT.\ndataset_num_proc: int | None\n\n# Deduplicates datasets and test_datasets with identical entries\ndataset_exact_deduplication: bool | None\n# Keep dataset in memory while preprocessing. Only needed if cached dataset is taking\n# too much storage\ndataset_keep_in_memory: bool | None\ndataloader_pin_memory: bool | None\ndataloader_num_workers: int | None\ndataloader_prefetch_factor: int | None\ndataloader_drop_last: bool | None\n\naccelerator_config: dict[str, Any] | None\n\nremove_unused_columns: bool | None\n\n# Push prepared dataset to hub - repo_org/repo_name\npush_dataset_to_hub: str | None\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private\n# datasets. Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: bool | None\n\ndevice: Any | None\n# Passed through to transformers when loading the model when launched without\n# accelerate. Use `sequential` when training w/ model parallelism to limit memory\ndevice_map: Any | None\nworld_size: int | None\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank: int | None\nddp: bool | None\n\n# Seed for reproducibility\nseed: int | None\n# Advanced DDP Arguments - timeout\nddp_timeout: int | None\n# Advanced DDP Arguments - bucket cap in MB\nddp_bucket_cap_mb: int | None\n# Advanced DDP Arguments - broadcast buffers\nddp_broadcast_buffers: bool | None\nddp_find_unused_parameters: bool | None\n\n# Approximate number of predictions sent to wandb depending on batch size. Enabled above\n# 0. Default is 0\neval_table_size: int | None\n# Total number of tokens generated for predictions sent to wandb. Default is 128\neval_max_new_tokens: int | None\n# Whether to run causal language model evaluation for metrics in\n# `eval_causal_lm_metrics`\ndo_causal_lm_eval: bool | None\n# HF evaluate metrics used during evaluation. Default is ['sacrebleu', 'comet', 'ter',\n# 'chrf', 'perplexity']\neval_causal_lm_metrics: list[str] | None\ndo_bench_eval: bool | None\nbench_dataset: str | None\nbench_split: str | None\nmetric_for_best_model: str | None\ngreater_is_better: bool | None\n\n# High loss value, indicating the learning has broken down (a good estimate is ~2 times\n# the loss at the start of training)\nloss_watchdog_threshold: float | None\n# Number of high-loss steps in a row before the trainer aborts (default: 3)\nloss_watchdog_patience: int | None\n\n# Run garbage collection every `gc_steps` steps. -1 will run on epoch end and before\n# evaluations. Default is 0 (disabled).\ngc_steps: int | None\n\n# Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection.\n# require >=ampere\nbf16: Literal['auto'] | bool | None = auto\n# Use CUDA fp16\nfp16: bool | None\n# Enable FP8 mixed precision training using TorchAO. Best used in combination with\n# torch.compile.\nfp8: bool | None\n# Enable FSDP float8 all-gather optimization for FP8 training. Can improve training\n# speed by 10-15% when FSDP is enabled.\nfp8_enable_fsdp_float8_all_gather: bool | None\n# No AMP (automatic mixed precision) - require >=ampere\nbfloat16: bool | None\n# No AMP (automatic mixed precision)\nfloat16: bool | None\n# Use CUDA tf32 - require >=ampere\ntf32: bool | None\nfloat32: bool | None\n\n# Whether to use gradient checkpointing. Available options are: true, false, 'offload',\n# 'offload_disk'.\n# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: Literal['offload', 'offload_disk'] | bool | None = False\n# Additional kwargs to pass to the trainer for gradient checkpointing\ngradient_checkpointing_kwargs: dict[str, Any] | None\n# Whether to offload activations. Available options are: true, false, 'legacy', 'disk'.\nactivation_offloading: Literal['legacy', 'disk'] | bool | None = False\n\nunfrozen_parameters: list[str] | None\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: int = 512\n# What to do when a tokenized row exceeds sequence_len. 'drop' removes the row;\n# 'truncate' slices tensors to sequence_len. Defaults to 'drop' for backward\n# compatibility.\nexcess_length_strategy: Literal['drop', 'truncate'] | None\n# The maximum length of an input for evaluation. If not specified, defaults to\n# sequence_len\neval_sequence_len: int | None\nmin_sample_len: int | None\n# maximum prompt length for RL training\nmax_prompt_len: int | None\n# Use efficient multi-packing with block diagonal attention and per sequence\n# position_ids. Recommend set to 'true'\nsample_packing: bool | None\n# The number of samples packed at a time. Increasing the following values helps with\n# packing, but usually only slightly (<%1.)\nsample_packing_group_size: int | None = 100000\n# The number of samples which can be packed into one sequence. Increase if using a large\n# sequence_len with many short samples.\nsample_packing_bin_size: int | None = 200\n# Whether to pack samples sequentially\nsample_packing_sequentially: bool | None\n# The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or\n# 'forkserver'\nsample_packing_mp_start_method: str | None\n# Set to 'false' if getting errors during eval with sample_packing on\neval_sample_packing: bool | None\n# Pad inputs so each step uses constant sized buffers. This will reduce memory\n# fragmentation and may prevent OOMs, by re-using memory more efficiently. Defaults to\n# True if `sample_packing` enabled\npad_to_sequence_len: bool | None\n# Whether to use sequential sampling for curriculum learning\ncurriculum_sampling: bool | None\nmultipack_real_batches: bool | None\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening: Literal['auto'] | bool | None\n\nuse_pose: bool | None\npose_split_on_token_ids: list[int] | None\npose_max_context_len: int | None\npose_num_chunks: int | None\n\npretrain_multipack_buffer_size: int | None\n# whether to prevent cross attention for packed sequences during pretraining\npretrain_multipack_attn: bool | None = True\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation: bool | None\n\n# Use streaming mode for loading datasets\nstreaming: bool | None\n# Buffer size for multipack streaming datasets\nstreaming_multipack_buffer_size: int | None = 10000\n\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers\nxformers_attention: bool | None\n# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/\n# torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention: bool | None\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention: bool | None\nflex_attention: bool | None\nflex_attn_compile_kwargs: dict[str, Any] | None\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention\nflash_attention: bool | None\n# Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_cross_entropy: bool | None\n# Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_rms_norm: bool | None\n# Whether to fuse part of the MLP into a single operation\nflash_attn_fuse_mlp: bool | None\n# Whether to use bettertransformers\nflash_optimum: bool | None\n\neager_attention: bool | None\n\n# Specify a custom attention implementation, used mostly for kernels.\nattn_implementation: str | None\n\nunsloth_cross_entropy_loss: bool | None\nunsloth_lora_mlp: bool | None\nunsloth_lora_qkv: bool | None\nunsloth_lora_o: bool | None\nunsloth_rms_norm: bool | None\nunsloth_rope: bool | None\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_mlp_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_qkv_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_o_kernel: bool | None\n\n# Whether to use chunked cross entropy loss for memory efficiency\nchunked_cross_entropy: bool | None\n# Number of chunks to use for chunked cross entropy loss\nchunked_cross_entropy_num_chunks: int | None\n\n# Whether to use ALST tiled mlp for memory efficient long context\ntiled_mlp: bool | None\n\n# Number of shards to use for ALST tiled mlp. If unset, it will be set based on\n# seqlen/hidden_size\ntiled_mlp_num_shards: int | None\n\n# Whether to use original mlp for ALST tiled mlp. Otherwise uses a generic MLP based on\n# llama.\ntiled_mlp_use_original_mlp: bool | None = True\n\nllama4_linearized_experts: bool | None\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed: str | dict[str, Any] | None\n# Whether to use deepcompile for faster training with deepspeed\ndeepcompile: bool | None\n# FSDP configuration\nfsdp: list[str] | None\n\n# FSDP configuration options\nfsdp_config: FSDPConfig | None\n # For FSDPConfig:\n # Enable activation checkpointing to reduce memory usage during forward passes\n activation_checkpointing: bool | None\n # Offload parameters to CPU to reduce GPU memory usage\n offload_params: bool | None\n # Synchronize module states across all processes\n sync_module_states: bool | None\n # Enable CPU RAM efficient loading to reduce memory usage during model loading\n cpu_ram_efficient_loading: bool | None\n # Disabling this enables swap memory usage for resource-constrained setups when\n # offload_params is enabled.\n cpu_offload_pin_memory: bool | None\n # Use original parameters instead of flattened parameters\n use_orig_params: bool | None\n\n # Type of state dict to use for saving/loading checkpoints\n state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n # Final state dict type to use after training completion\n final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n\n # Policy for automatically wrapping modules with FSDP\n auto_wrap_policy: Literal['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP'] | None\n # Class name of transformer layers to wrap (e.g., 'LlamaDecoderLayer')\n transformer_layer_cls_to_wrap: str | None\n\n # Reshard parameters after forward pass to save memory\n reshard_after_forward: bool | None\n # Mixed precision policy for FSDP (e.g., 'fp16', 'bf16')\n mixed_precision_policy: str | None\n\n# FSDP version\nfsdp_version: int | None\nfsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for\n# no eval.\nval_set_size: float | None = 0.0\n\n# Number of devices to shard across. If not set, will use all available devices.\ndp_shard_size: int | None\n# Number of devices to replicate across.\ndp_replicate_size: int | None\n# Deprecated: use `context_parallel_size` instead\nsequence_parallel_degree: int | None\n# Set to a divisor of the number of GPUs available to split sequences into chunks of\n# equal size. Use in long context training to prevent OOM when sequences cannot fit into\n# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each\n# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized\n# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more\n# details.\ncontext_parallel_size: int | None\n# Optional; strides across the key dimension. Larger values use more memory but should\n# make training faster. Must evenly divide the number of KV heads in your model.\nheads_k_stride: int | None\n# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to\n# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing\n# case.\nring_attn_func: RingAttnFunc | None\n# Number of tensor parallel processes in TP group. Only supported with DeepSpeed AutoTP.\ntensor_parallel_size: int | None\n\n# Add or change special tokens. If you add tokens here, you don't need to add them to\n# the `tokens` list.\nspecial_tokens: SpecialTokensConfig | None\n # For SpecialTokensConfig:\n bos_token: str | None\n eos_token: str | None\n pad_token: str | None\n unk_token: str | None\n additional_special_tokens: list[str] | None\n\n# Add extra tokens to the tokenizer\ntokens: list[str] | None\n# Mapping token_id to new_token_string to override reserved added_tokens in the\n# tokenizer. Only works for tokens that are not part of the base vocab (aka are\n# added_tokens). Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: dict[int, str] | None\n\n# Whether to use torch.compile and which backend to use. setting to `auto` will enable\n# torch compile when torch>=2.6.0\ntorch_compile: Literal['auto'] | bool | None\n# Backend to use for torch.compile\ntorch_compile_backend: str | None\ntorch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None\n\n# Maximum number of iterations to train for. It precedes num_epochs which means that if\n# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =>\n# `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps: int | None\n# Number of warmup steps. Cannot use with warmup_ratio\nwarmup_steps: int | None\n# Warmup ratio. Cannot use with warmup_steps\nwarmup_ratio: float | None\n# Leave empty to eval at each epoch, integer for every N steps. float for fraction of\n# total steps\neval_steps: int | float | None\n# Number of times per epoch to run evals, mutually exclusive with eval_steps\nevals_per_epoch: int | None\n# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer\n# from `eval_steps`\neval_strategy: str | None\n\n# Leave empty to save at each epoch, integer for every N steps. float for fraction of\n# total steps\nsave_steps: int | float | None\n# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsaves_per_epoch: int | None\n# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better\n# result is achieved, leave empty to infer from `save_steps`\nsave_strategy: str | None\n# Checkpoints saved at a time\nsave_total_limit: int | None\n# Whether to checkpoint a model after the first step of training. Defaults to False.\nsave_first_step: bool | None\n\n# Logging frequency\nlogging_steps: int | None\n# Stop training after this many evaluation losses have increased in a row. https://huggi\n# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin\n# gCallback\nearly_stopping_patience: int | None\nload_best_model_at_end: bool | None = False\n# Save only the model weights, skipping the optimizer. Using this means you can't resume\n# from checkpoints.\nsave_only_model: bool | None = False\n# Use tensorboard for logging\nuse_tensorboard: bool | None\n# Enable the pytorch profiler to capture the first N steps of training to the\n# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more\n# information. Snapshots can be visualized @ https://pytorch.org/memory_viz\nprofiler_steps: int | None\n# Which step to start the profiler at. Useful for only capturing a few steps mid-run.\nprofiler_steps_start: int | None = 0\n# bool of whether to report tokens per second at the end of training. This is not\n# supported with pre-training datasets.\ninclude_tokens_per_second: bool | None\n# bool of whether to report tokens per second per-gpu during training by measuring\n# throughput of non-padding tokens.\ninclude_tkps: bool | None = True\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to\n# add noise to embeddings. Currently only supported on Llama and Mistral\nneftune_noise_alpha: float | None\n\n# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to\n# `beta` in `ORPOConfig` due to trl mapping.\norpo_alpha: float | None\n# Weighting of NLL term in loss from RPO paper\nrpo_alpha: float | None\n# Target reward margin for the SimPO loss\nsimpo_gamma: float | None\n# Weight of the BC regularizer\ncpo_alpha: float | None\n\n# Factor for desirable loss term in KTO loss\nkto_desirable_weight: float | None\n# Factor for undesirable loss term in KTO loss\nkto_undesirable_weight: float | None\n# The beta parameter for the RL training\nrl_beta: float | None\n\n# Defines the max memory usage per gpu on the system. Passed through to transformers\n# when loading the model.\nmax_memory: dict[int | Literal['cpu', 'disk'], int | str] | None\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in\n# gigabytes); default: unset\ngpu_memory_limit: int | str | None\n# Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage: bool | None\n\n# The name of the chat template to use for training, following values are supported:\n# tokenizer_default: Uses the chat template that is available in the\n# tokenizer_config.json. If the chat template is not available in the tokenizer, it will\n# raise an error. This is the default value.\n# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.\n# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not\n# available in the tokenizer. jinja: Uses a custom jinja template for the chat template.\n# The custom jinja template should be provided in the chat_template_jinja field. The\n# selected chat template will be saved to the tokenizer_config.json for easier\n# inferencing\nchat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None\n# Custom jinja template or path to jinja file for chat template. This will be only used\n# if chat_template is set to `jinja` or `null` (in which case chat_template is\n# automatically set to `jinja`). Default is null.\nchat_template_jinja: str | None\n# Additional kwargs to pass to the chat template. This is useful for customizing the\n# chat template. For example, you can pass `thinking=False` to add a generation prompt\n# to the chat template.\nchat_template_kwargs: dict[str, Any] | None\n# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the\n# boundaries between conversation turns. For example: ['/INST', '</s>',\n# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is\n# useful for templates that use multiple delimiter tokens.\neot_tokens: list[str] | None\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: str | None\n\n# Token index or indices to adjust embedding weights to the mean of the other tokens.\n# This is useful when the model has untrained embeddings.\nfix_untrained_tokens: int | list[int] | None\n\nis_preprocess: bool | None\npreprocess_iterable: bool | None\n\n# Total number of tokens - internal use\ntotal_num_tokens: int | None\ntotal_supervised_tokens: int | None\n# You can set these packing optimizations AFTER starting a training at least once. The\n# trainer will provide recommended values for these values.\nsample_packing_eff_est: float | None\naxolotl_config_path: str | None\n\n# Internal use only - Used to identify which the model is based on\nis_falcon_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_llama_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on. Please note that if\n# you set this to true, `padding_side` will be set to 'left' by default\nis_mistral_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_qwen_derived_model: bool | None\n\n# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available\n# plugins or doc below for more details.\n# https://docs.axolotl.ai/docs/custom_integrations.html\nplugins: list[str] | None\n\n# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This\n# can also be a relative path to a model on disk\nbase_model: str (required)\n# If the base_model repo on hf hub doesn't include configuration .json files, You can\n# set that here, or leave this empty to default to base_model\nbase_model_config: str | None\ncls_model_config: str | None\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config: str | None\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast: bool | None\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy: bool | None\n# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-\n# common tokenizer.\ntokenizer_use_mistral_common: bool | None\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: str | None\n# transformers processor class\nprocessor_type: str | None\n# Whether to save jinja files for tokenizer, transformers default is True\ntokenizer_save_jinja_files: bool | None = True\n# Trust remote code for untrusted source\ntrust_remote_code: bool | None\n\n# Don't move the model to the device before sharding. Set to `false` to revert to legacy\n# behavior.\nexperimental_skip_move_to_device: bool | None = True\n\n# Use custom kernels, e.g. MegaBlocks.\nuse_kernels: bool | None\n\n# Model loading quantization config\nmodel_quantization_config: Literal['Mxfp4Config'] | None\n# kwargs for model quantization config\nmodel_quantization_config_kwargs: dict[str, Any] | None\n\n# Where to save the full-finetuned model to\noutput_dir: str = ./model-out\n# push checkpoints to hub\nhub_model_id: str | None\n# how to push checkpoints to hub\nhub_strategy: str | None\n# Save model as safetensors (require safetensors package). Default True\nsave_safetensors: bool | None = True\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: bool | None = False\n# Use bitsandbytes 4 bit\nload_in_4bit: bool | None = False\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in\n# original model\nadapter: str | None\n# If you already have a lora model trained that you want to load, put that here. This\n# means after training, if you want to test the model, you should set this to the value\n# of `output_dir`. Note that if you merge an adapter to the base model, a new\n# subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir: str | None\nlora_r: int | None\nlora_alpha: int | None\nlora_fan_in_fan_out: bool | None\nlora_target_modules: str | list[str] | None\nlora_target_parameters: str | list[str] | None\n# If true, will target all linear modules\nlora_target_linear: bool | None\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules\n# because they need to know the new tokens. For LLaMA and Mistral, you need to save\n# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts\n# tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\nlora_modules_to_save: list[str] | None\nlora_dropout: float | None = 0.0\n# The layer indices to transform, otherwise, apply to all layers\npeft_layers_to_transform: list[int] | None\npeft_layers_pattern: list[str] | None\n\npeft: PeftConfig | None\n # For PeftConfig:\n # Configuration options for loftq initialization for LoRA\n loftq_config: LoftQConfig | None\n # For LoftQConfig:\n # typically 4 bits\n loftq_bits: int = 4\n\n# Whether to use DoRA.\npeft_use_dora: bool | None\n# Whether to use RSLoRA.\npeft_use_rslora: bool | None\n# List of layer indices to replicate.\npeft_layer_replication: list[tuple[int, int]] | None\n# How to initialize LoRA weights. Default to True which is MS original implementation.\npeft_init_lora_weights: bool | str | None\n# A list of token indices to fine-tune on the `embed_tokens` layer. Otherwise, a dict\n# mapping an embedding layer name to its trainable token indices. See\n# https://huggingface.co/docs/peft/v0.17.0/en/developer_guides/lora#efficiently-train-\n# tokens-alongside-lora\npeft_trainable_token_indices: list[int] | dict[str, list[int]] | None\n\n# load qlora model in sharded format for FSDP using answer.ai technique.\nqlora_sharded_model_loading: bool | None = False\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it\n# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: bool | None\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: bool | None\n# optional overrides to the bnb 4bit quantization configuration\nbnb_config_kwargs: dict[str, Any] | None\n\n# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_ratio: float | None\n# loraplus learning rate for lora embedding layers. Default value is 1e-6.\nloraplus_lr_embedding: float | None = 1e-06\n\nmerge_lora: bool | None\n\n# Whether to use ReLoRA. Use with jagged_restart_*steps options.\nrelora: bool | None\n# threshold for optimizer magnitude when pruning\nrelora_prune_ratio: float | None\n# True to perform lora weight merges on cpu during restarts, for modest gpu memory\n# savings\nrelora_cpu_offload: bool | None\n\n# how often to reset for jagged restarts\njagged_restart_steps: int | None\n# how many warmup steps to take after reset for jagged restarts\njagged_restart_warmup_steps: int | None\n# how many anneal steps to take before reset for jagged restarts\njagged_restart_anneal_steps: int | None\n\n# If greater than 1, backpropagation will be skipped and the gradients will be\n# accumulated for the given number of steps.\ngradient_accumulation_steps: int | None = 1\n# The number of samples to include in each batch. This is the number of samples sent to\n# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: int | None = 1\n# Total batch size, we do not recommended setting this manually\nbatch_size: int | None\n# per gpu micro batch size for evals, defaults to value of micro_batch_size\neval_batch_size: int | None\n\n# whether to find batch size that fits in memory. Passed to underlying transformers\n# Trainer\nauto_find_batch_size: bool | None\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: bool | None = False\n# Group similarly sized data to minimize padding. May be slower to start, as it must\n# download and sort the entire dataset. Note that training loss may have an oscillating\n# pattern with this enabled.\ngroup_by_length: bool | None\n\nlearning_rate: str | float (required)\nembedding_lr: float | None\nembedding_lr_scale: float | None\n# Specify weight decay\nweight_decay: float | None = 0.0\n# Specify optimizer\noptimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED\n# Dictionary of arguments to pass to the optimizer\noptim_args: str | dict[str, Any] | None\n# The target modules to optimize, i.e. the module names that you would like to train,\n# right now this is used only for GaLore algorithm\noptim_target_modules: list[str] | Literal['all_linear'] | None\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path: str | None\nlr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler_kwargs: dict[str, Any] | None\nlr_quadratic_warmup: bool | None\n# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of\n# peak lr\ncosine_min_lr_ratio: float | None\n# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means\n# start cosine_min_lr at 80% of training step\ncosine_constant_lr_ratio: float | None\n# Learning rate div factor\nlr_div_factor: float | None\n\nlr_groups: list[LrGroup] | None\n # For LrGroup:\n name: str (required)\n modules: list[str] (required)\n lr: float (required)\n\n# adamw hyperparams\nadam_epsilon: float | None\n# only used for CAME Optimizer\nadam_epsilon2: float | None\n# adamw hyperparams\nadam_beta1: float | None\n# adamw hyperparams\nadam_beta2: float | None\n# only used for CAME Optimizer\nadam_beta3: float | None\n\n# Dion Optimizer learning rate\ndion_lr: float | None\n# Dion Optimizer momentum\ndion_momentum: float | None\n# Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank\n# dimension.\ndion_rank_fraction: float | None = 1.0\n# Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may\n# be useful to ensure even sharding.\ndion_rank_multiple_of: int | None = 1\n\n# Gradient clipping max norm\nmax_grad_norm: float | None\nnum_epochs: float = 1.0\n\nuse_wandb: bool | None\n# Set the name of your wandb run\nwandb_name: str | None\n# Set the ID of your wandb run\nwandb_run_id: str | None\n# \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn\n# off wandb\nwandb_mode: str | None\n# Your wandb project name\nwandb_project: str | None\n# A wandb Team name if using a Team\nwandb_entity: str | None\nwandb_watch: str | None\n# \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only\n# at the end of training\nwandb_log_model: str | None\n\nuse_mlflow: bool | None\n# URI to mlflow\nmlflow_tracking_uri: str | None\n# Your experiment name\nmlflow_experiment_name: str | None\n# Your run name\nmlflow_run_name: str | None\n# set to true to copy each saved checkpoint on each save to mlflow artifact registry\nhf_mlflow_log_artifacts: bool | None\n\n# Enable or disable Comet integration.\nuse_comet: bool | None\n# API key for Comet. Recommended to set via `comet login`.\ncomet_api_key: str | None\n# Workspace name in Comet. Defaults to the user's default workspace.\ncomet_workspace: str | None\n# Project name in Comet. Defaults to Uncategorized.\ncomet_project_name: str | None\n# Identifier for the experiment. Used to append data to an existing experiment or\n# control the key of new experiments. Default to a random key.\ncomet_experiment_key: str | None\n# Create a new experiment (\"create\") or log to an existing one (\"get\"). Default\n# (\"get_or_create\") auto-selects based on configuration.\ncomet_mode: str | None\n# Set to True to log data to Comet server, or False for offline storage. Default is\n# True.\ncomet_online: bool | None\n# Dictionary for additional configuration settings, see the doc for more details.\ncomet_experiment_config: dict[str, Any] | None\n\n# the number of activate layers in LISA\nlisa_n_layers: int | None\n# how often to switch layers in LISA\nlisa_step_interval: int | None\n# path under the model to access the layers\nlisa_layers_attribute: str | None = model.layers\n\ngradio_title: str | None\ngradio_share: bool | None\ngradio_server_name: str | None\ngradio_server_port: int | None\ngradio_max_new_tokens: int | None\ngradio_temperature: float | None\n\nuse_ray: bool = False\nray_run_name: str | None\nray_num_workers: int = 1\nresources_per_worker: dict\n\n# The size of the image to resize to. It can be an integer (resized into padded-square\n# image) or a tuple (width, height).If not provided, we will attempt to load from\n# preprocessor.size, otherwise, images won't be resized.\nimage_size: int | tuple[int, int] | None\n# The resampling algorithm to use for image resizing. Default is bilinear. Please refer\n# to PIL.Image.Resampling for more details.\nimage_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None\n\n# optional overrides to the base model configuration\noverrides_of_model_config: dict[str, Any] | None\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs: dict[str, Any] | None\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good\n# choice too\ntype_of_model: str | None\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model: str | None\n\nmax_packed_sequence_len: int | None\nrope_scaling: Any | None\nnoisy_embedding_alpha: float | None\ndpo_beta: float | None\nevaluation_strategy: str | None",
"crumbs": [
"Getting Started",
"Config Reference"
@@ -2283,7 +2283,7 @@
"href": "docs/custom_integrations.html#cut-cross-entropy",
"title": "Custom Integrations",
"section": "Cut Cross Entropy",
- "text": "Cut Cross Entropy\nCut Cross Entropy (CCE) reduces VRAM usage through optimization on the cross-entropy operation during loss calculation.\nSee https://github.com/apple/ml-cross-entropy\n\nRequirements\n\nPyTorch 2.4.0 or higher\n\n\n\nInstallation\nRun the following command to install cut_cross_entropy[transformers] if you don’t have it already.\n\nIf you are in dev environment\n\npython scripts/cutcrossentropy_install.py | sh\n\nIf you are installing from pip\n\npip3 uninstall -y cut-cross-entropy && pip3 install \"cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@49f3308\"\n\n\nUsage\nplugins:\n - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin\n\n\nSupported Models\n\napertus\narcee\ncohere\ncohere2\ndeepseek_v3\ngemma\ngemma2\ngemma3\ngemma3_text\ngemma3n\ngemma3n_text\nglm\nglm4\nglm4_moe\nglm4v\nglm4v_moe\ngpt_oss\ngranite\ngranitemoe\ngranitemoeshared\ngranitemoehybrid\nhunyuan_v1_dense\nhunyuan_v1_moe\nlfm2\nlfm2_moe\nlfm2_vl\nllama\nllama4\nllama4_text\nllava\nmistral\nmistral3\nmixtral\nmllama\nphi\nphi3\nphi4_multimodal\nqwen2\nqwen2_vl\nqwen2_moe\nqwen2_5_vl\nqwen3\nqwen3_moe\nqwen3_vl\nqwen3_vl_moe\nqwen3_next\nsmollm3\nseed_oss\nvoxtral\n\n\n\nCitation\n@article{wijmans2024cut,\n author = {Erik Wijmans and\n Brody Huval and\n Alexander Hertzberg and\n Vladlen Koltun and\n Philipp Kr\\\"ahenb\\\"uhl},\n title = {Cut Your Losses in Large-Vocabulary Language Models},\n journal = {arXiv},\n year = {2024},\n url = {https://arxiv.org/abs/2411.09009},\n}\nPlease see reference here",
+ "text": "Cut Cross Entropy\nCut Cross Entropy (CCE) reduces VRAM usage through optimization on the cross-entropy operation during loss calculation.\nSee https://github.com/apple/ml-cross-entropy\n\nRequirements\n\nPyTorch 2.4.0 or higher\n\n\n\nInstallation\nRun the following command to install cut_cross_entropy[transformers] if you don’t have it already.\n\nIf you are in dev environment\n\npython scripts/cutcrossentropy_install.py | sh\n\nIf you are installing from pip\n\npip3 uninstall -y cut-cross-entropy && pip3 install \"cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@8a1a0ec\"\n\n\nUsage\nplugins:\n - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin\n\n\nSupported Models\n\napertus\narcee\ncohere\ncohere2\ndeepseek_v3\ngemma\ngemma2\ngemma3\ngemma3_text\ngemma3n\ngemma3n_text\nglm\nglm4\nglm4_moe\nglm4v\nglm4v_moe\ngpt_oss\ngranite\ngranitemoe\ngranitemoeshared\ngranitemoehybrid\nhunyuan_v1_dense\nhunyuan_v1_moe\nlfm2\nlfm2_moe\nlfm2_vl\nllama\nllama4\nllama4_text\nllava\nmistral\nmistral3\nmixtral\nmllama\nphi\nphi3\nphi4_multimodal\nqwen2\nqwen2_vl\nqwen2_moe\nqwen2_5_vl\nqwen3\nqwen3_moe\nqwen3_vl\nqwen3_vl_moe\nqwen3_next\nsmollm3\nseed_oss\nvoxtral\n\n\n\nCitation\n@article{wijmans2024cut,\n author = {Erik Wijmans and\n Brody Huval and\n Alexander Hertzberg and\n Vladlen Koltun and\n Philipp Kr\\\"ahenb\\\"uhl},\n title = {Cut Your Losses in Large-Vocabulary Language Models},\n journal = {arXiv},\n year = {2024},\n url = {https://arxiv.org/abs/2411.09009},\n}\nPlease see reference here",
"crumbs": [
"Advanced Features",
"Custom Integrations"
diff --git a/sitemap.xml b/sitemap.xml
index 6a8c507a3..31b783a5c 100644
--- a/sitemap.xml
+++ b/sitemap.xml
@@ -2,798 +2,798 @@
https://docs.axolotl.ai/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html
- 2025-10-10T13:44:33.367Z
+ 2025-10-14T19:54:15.029Zhttps://docs.axolotl.ai/docs/mac.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/cli.html
- 2025-10-10T13:44:33.341Z
+ 2025-10-14T19:54:15.003Zhttps://docs.axolotl.ai/docs/nccl.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/getting-started.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.004Zhttps://docs.axolotl.ai/docs/lr_groups.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/qat.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/multipack.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/streaming.html
- 2025-10-10T13:44:33.346Z
+ 2025-10-14T19:54:15.008Zhttps://docs.axolotl.ai/docs/lora_optims.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/amd_hpc.html
- 2025-10-10T13:44:33.341Z
+ 2025-10-14T19:54:15.003Zhttps://docs.axolotl.ai/docs/debugging.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.004Zhttps://docs.axolotl.ai/docs/dataset-formats/conversation.html
- 2025-10-10T13:44:33.341Z
+ 2025-10-14T19:54:15.003Zhttps://docs.axolotl.ai/docs/dataset-formats/inst_tune.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.003Zhttps://docs.axolotl.ai/docs/dataset-formats/index.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.003Zhttps://docs.axolotl.ai/docs/config-reference.html
- 2025-10-10T13:48:08.426Z
+ 2025-10-14T19:58:00.807Zhttps://docs.axolotl.ai/docs/multimodal.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/ray-integration.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/faq.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.004Zhttps://docs.axolotl.ai/docs/dataset_preprocessing.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.004Zhttps://docs.axolotl.ai/docs/torchao.html
- 2025-10-10T13:44:33.346Z
+ 2025-10-14T19:54:15.008Zhttps://docs.axolotl.ai/docs/optimizers.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/api/utils.schedulers.html
- 2025-10-10T13:47:53.468Z
+ 2025-10-14T19:57:44.801Zhttps://docs.axolotl.ai/docs/api/cli.utils.sweeps.html
- 2025-10-10T13:47:52.680Z
+ 2025-10-14T19:57:44.010Zhttps://docs.axolotl.ai/docs/api/datasets.html
- 2025-10-10T13:47:52.278Z
+ 2025-10-14T19:57:43.605Zhttps://docs.axolotl.ai/docs/api/utils.tokenization.html
- 2025-10-10T13:47:53.385Z
+ 2025-10-14T19:57:44.718Zhttps://docs.axolotl.ai/docs/api/loaders.tokenizer.html
- 2025-10-10T13:47:52.797Z
+ 2025-10-14T19:57:44.127Zhttps://docs.axolotl.ai/docs/api/monkeypatch.llama_expand_mask.html
- 2025-10-10T13:47:53.259Z
+ 2025-10-14T19:57:44.591Zhttps://docs.axolotl.ai/docs/api/monkeypatch.gradient_checkpointing.offload_cpu.html
- 2025-10-10T13:47:53.345Z
+ 2025-10-14T19:57:44.678Zhttps://docs.axolotl.ai/docs/api/utils.data.sft.html
- 2025-10-10T13:47:53.518Z
+ 2025-10-14T19:57:44.852Zhttps://docs.axolotl.ai/docs/api/monkeypatch.transformers_fa_utils.html
- 2025-10-10T13:47:53.326Z
+ 2025-10-14T19:57:44.658Zhttps://docs.axolotl.ai/docs/api/loaders.patch_manager.html
- 2025-10-10T13:47:52.818Z
+ 2025-10-14T19:57:44.148Zhttps://docs.axolotl.ai/docs/api/integrations.liger.args.html
- 2025-10-10T13:47:53.836Z
+ 2025-10-14T19:57:45.173Zhttps://docs.axolotl.ai/docs/api/utils.schemas.peft.html
- 2025-10-10T13:47:53.607Z
+ 2025-10-14T19:57:44.941Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.pygmalion.html
- 2025-10-10T13:47:53.010Z
+ 2025-10-14T19:57:44.341Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_instruct.html
- 2025-10-10T13:47:52.929Z
+ 2025-10-14T19:57:44.259Zhttps://docs.axolotl.ai/docs/api/cli.cloud.base.html
- 2025-10-10T13:47:52.636Z
+ 2025-10-14T19:57:43.966Zhttps://docs.axolotl.ai/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html
- 2025-10-10T13:47:53.377Z
+ 2025-10-14T19:57:44.710Zhttps://docs.axolotl.ai/docs/api/kernels.swiglu.html
- 2025-10-10T13:47:53.230Z
+ 2025-10-14T19:57:44.561Zhttps://docs.axolotl.ai/docs/api/integrations.cut_cross_entropy.args.html
- 2025-10-10T13:47:53.821Z
+ 2025-10-14T19:57:45.158Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.kto.user_defined.html
- 2025-10-10T13:47:53.070Z
+ 2025-10-14T19:57:44.401Zhttps://docs.axolotl.ai/docs/api/monkeypatch.utils.html
- 2025-10-10T13:47:53.303Z
+ 2025-10-14T19:57:44.636Zhttps://docs.axolotl.ai/docs/api/core.builders.rl.html
- 2025-10-10T13:47:52.376Z
+ 2025-10-14T19:57:43.705Zhttps://docs.axolotl.ai/docs/api/loaders.processor.html
- 2025-10-10T13:47:52.799Z
+ 2025-10-14T19:57:44.129Zhttps://docs.axolotl.ai/docs/api/utils.callbacks.lisa.html
- 2025-10-10T13:47:53.965Z
+ 2025-10-14T19:57:45.304Zhttps://docs.axolotl.ai/docs/api/core.training_args.html
- 2025-10-10T13:47:52.392Z
+ 2025-10-14T19:57:43.721Zhttps://docs.axolotl.ai/docs/api/loaders.adapter.html
- 2025-10-10T13:47:52.806Z
+ 2025-10-14T19:57:44.135Zhttps://docs.axolotl.ai/docs/api/cli.merge_sharded_fsdp_weights.html
- 2025-10-10T13:47:52.607Z
+ 2025-10-14T19:57:43.938Zhttps://docs.axolotl.ai/docs/api/cli.train.html
- 2025-10-10T13:47:52.492Z
+ 2025-10-14T19:57:43.822Zhttps://docs.axolotl.ai/docs/api/core.trainers.mixins.rng_state_loader.html
- 2025-10-10T13:47:52.831Z
+ 2025-10-14T19:57:44.160Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.completion.html
- 2025-10-10T13:47:52.976Z
+ 2025-10-14T19:57:44.307Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.stepwise_supervised.html
- 2025-10-10T13:47:52.989Z
+ 2025-10-14T19:57:44.319Zhttps://docs.axolotl.ai/docs/api/monkeypatch.lora_kernels.html
- 2025-10-10T13:47:53.293Z
+ 2025-10-14T19:57:44.626Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.messages.chat.html
- 2025-10-10T13:47:53.016Z
+ 2025-10-14T19:57:44.346Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.user_defined.html
- 2025-10-10T13:47:52.953Z
+ 2025-10-14T19:57:44.283Zhttps://docs.axolotl.ai/docs/api/core.chat.messages.html
- 2025-10-10T13:47:52.420Z
+ 2025-10-14T19:57:43.750Zhttps://docs.axolotl.ai/docs/api/core.trainers.mixins.scheduler.html
- 2025-10-10T13:47:52.839Z
+ 2025-10-14T19:57:44.169Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.dpo.user_defined.html
- 2025-10-10T13:47:53.047Z
+ 2025-10-14T19:57:44.377Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.kto.llama3.html
- 2025-10-10T13:47:53.059Z
+ 2025-10-14T19:57:44.389Zhttps://docs.axolotl.ai/docs/api/utils.schemas.integrations.html
- 2025-10-10T13:47:53.632Z
+ 2025-10-14T19:57:44.967Zhttps://docs.axolotl.ai/docs/api/convert.html
- 2025-10-10T13:47:52.294Z
+ 2025-10-14T19:57:43.622Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.dpo.passthrough.html
- 2025-10-10T13:47:53.049Z
+ 2025-10-14T19:57:44.379Zhttps://docs.axolotl.ai/docs/api/utils.schemas.config.html
- 2025-10-10T13:47:53.557Z
+ 2025-10-14T19:57:44.891Zhttps://docs.axolotl.ai/docs/api/utils.schemas.enums.html
- 2025-10-10T13:47:53.643Z
+ 2025-10-14T19:57:44.978Zhttps://docs.axolotl.ai/docs/api/monkeypatch.btlm_attn_hijack_flash.html
- 2025-10-10T13:47:53.305Z
+ 2025-10-14T19:57:44.637Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.dpo.chat_template.html
- 2025-10-10T13:47:53.017Z
+ 2025-10-14T19:57:44.348Zhttps://docs.axolotl.ai/docs/api/core.trainers.grpo.trainer.html
- 2025-10-10T13:47:52.758Z
+ 2025-10-14T19:57:44.088Zhttps://docs.axolotl.ai/docs/api/integrations.lm_eval.args.html
- 2025-10-10T13:47:53.840Z
+ 2025-10-14T19:57:45.177Zhttps://docs.axolotl.ai/docs/api/utils.collators.core.html
- 2025-10-10T13:47:53.869Z
+ 2025-10-14T19:57:45.207Zhttps://docs.axolotl.ai/docs/api/core.chat.format.shared.html
- 2025-10-10T13:47:52.426Z
+ 2025-10-14T19:57:43.755Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.orpo.chat_template.html
- 2025-10-10T13:47:53.096Z
+ 2025-10-14T19:57:44.426Zhttps://docs.axolotl.ai/docs/api/utils.samplers.multipack.html
- 2025-10-10T13:47:53.951Z
+ 2025-10-14T19:57:45.289Zhttps://docs.axolotl.ai/docs/api/utils.callbacks.qat.html
- 2025-10-10T13:47:53.982Z
+ 2025-10-14T19:57:45.321Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.chat_template.html
- 2025-10-10T13:47:52.910Z
+ 2025-10-14T19:57:44.240Zhttps://docs.axolotl.ai/docs/api/utils.schemas.multimodal.html
- 2025-10-10T13:47:53.617Z
+ 2025-10-14T19:57:44.952Zhttps://docs.axolotl.ai/docs/api/utils.callbacks.comet_.html
- 2025-10-10T13:47:53.974Z
+ 2025-10-14T19:57:45.313Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.base.html
- 2025-10-10T13:47:52.870Z
+ 2025-10-14T19:57:44.200Zhttps://docs.axolotl.ai/docs/api/kernels.utils.html
- 2025-10-10T13:47:53.241Z
+ 2025-10-14T19:57:44.572Zhttps://docs.axolotl.ai/docs/api/cli.merge_lora.html
- 2025-10-10T13:47:52.593Z
+ 2025-10-14T19:57:43.923Zhttps://docs.axolotl.ai/docs/api/cli.utils.html
- 2025-10-10T13:47:52.645Z
+ 2025-10-14T19:57:43.976Zhttps://docs.axolotl.ai/docs/api/utils.ctx_managers.sequence_parallel.html
- 2025-10-10T13:47:52.868Z
+ 2025-10-14T19:57:44.198Zhttps://docs.axolotl.ai/docs/api/index.html
- 2025-10-10T13:47:52.180Z
+ 2025-10-14T19:57:43.507Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.dpo.llama3.html
- 2025-10-10T13:47:53.030Z
+ 2025-10-14T19:57:44.361Zhttps://docs.axolotl.ai/docs/api/monkeypatch.mixtral.html
- 2025-10-10T13:47:53.341Z
+ 2025-10-14T19:57:44.673Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.orcamini.html
- 2025-10-10T13:47:53.002Z
+ 2025-10-14T19:57:44.333Zhttps://docs.axolotl.ai/docs/api/core.trainers.grpo.sampler.html
- 2025-10-10T13:47:52.773Z
+ 2025-10-14T19:57:44.103Zhttps://docs.axolotl.ai/docs/api/utils.lora.html
- 2025-10-10T13:47:53.393Z
+ 2025-10-14T19:57:44.726Zhttps://docs.axolotl.ai/docs/api/core.trainers.mixins.optimizer.html
- 2025-10-10T13:47:52.827Z
+ 2025-10-14T19:57:44.156Zhttps://docs.axolotl.ai/docs/api/cli.config.html
- 2025-10-10T13:47:52.560Z
+ 2025-10-14T19:57:43.890Zhttps://docs.axolotl.ai/docs/api/monkeypatch.multipack.html
- 2025-10-10T13:47:53.253Z
+ 2025-10-14T19:57:44.585Zhttps://docs.axolotl.ai/docs/api/utils.collators.batching.html
- 2025-10-10T13:47:53.892Z
+ 2025-10-14T19:57:45.230Zhttps://docs.axolotl.ai/docs/api/utils.quantization.html
- 2025-10-10T13:47:53.542Z
+ 2025-10-14T19:57:44.876Zhttps://docs.axolotl.ai/docs/api/utils.dict.html
- 2025-10-10T13:47:53.499Z
+ 2025-10-14T19:57:44.833Zhttps://docs.axolotl.ai/docs/api/kernels.quantize.html
- 2025-10-10T13:47:53.239Z
+ 2025-10-14T19:57:44.571Zhttps://docs.axolotl.ai/docs/api/utils.schemas.training.html
- 2025-10-10T13:47:53.574Z
+ 2025-10-14T19:57:44.908Zhttps://docs.axolotl.ai/docs/api/train.html
- 2025-10-10T13:47:52.258Z
+ 2025-10-14T19:57:43.585Zhttps://docs.axolotl.ai/docs/api/core.datasets.transforms.chat_builder.html
- 2025-10-10T13:47:52.441Z
+ 2025-10-14T19:57:43.771Zhttps://docs.axolotl.ai/docs/inference.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.006Zhttps://docs.axolotl.ai/FAQS.html
- 2025-10-10T13:44:33.340Z
+ 2025-10-14T19:54:15.002Zhttps://docs.axolotl.ai/examples/colab-notebooks/colab-axolotl-example.html
- 2025-10-10T13:44:33.350Z
+ 2025-10-14T19:54:15.012Zhttps://docs.axolotl.ai/index.html
- 2025-10-10T13:44:33.363Z
+ 2025-10-14T19:54:15.024Zhttps://docs.axolotl.ai/docs/custom_integrations.html
- 2025-10-10T13:44:33.341Z
+ 2025-10-14T19:54:15.003Zhttps://docs.axolotl.ai/docs/api/utils.schemas.utils.html
- 2025-10-10T13:47:53.650Z
+ 2025-10-14T19:57:44.985Zhttps://docs.axolotl.ai/docs/api/kernels.geglu.html
- 2025-10-10T13:47:53.218Z
+ 2025-10-14T19:57:44.549Zhttps://docs.axolotl.ai/docs/api/core.builders.causal.html
- 2025-10-10T13:47:52.370Z
+ 2025-10-14T19:57:43.699Zhttps://docs.axolotl.ai/docs/api/core.trainers.mamba.html
- 2025-10-10T13:47:52.737Z
+ 2025-10-14T19:57:44.067Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.bradley_terry.llama3.html
- 2025-10-10T13:47:53.100Z
+ 2025-10-14T19:57:44.431Zhttps://docs.axolotl.ai/docs/api/core.datasets.chat.html
- 2025-10-10T13:47:52.432Z
+ 2025-10-14T19:57:43.762Zhttps://docs.axolotl.ai/docs/api/utils.collators.mm_chat.html
- 2025-10-10T13:47:53.902Z
+ 2025-10-14T19:57:45.240Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.llama2_chat.html
- 2025-10-10T13:47:52.969Z
+ 2025-10-14T19:57:44.299Zhttps://docs.axolotl.ai/docs/api/common.const.html
- 2025-10-10T13:47:53.847Z
+ 2025-10-14T19:57:45.185Zhttps://docs.axolotl.ai/docs/api/cli.quantize.html
- 2025-10-10T13:47:52.624Z
+ 2025-10-14T19:57:43.954Zhttps://docs.axolotl.ai/docs/api/utils.trainer.html
- 2025-10-10T13:47:53.434Z
+ 2025-10-14T19:57:44.768Zhttps://docs.axolotl.ai/docs/api/cli.delinearize_llama4.html
- 2025-10-10T13:47:52.566Z
+ 2025-10-14T19:57:43.896Zhttps://docs.axolotl.ai/docs/api/evaluate.html
- 2025-10-10T13:47:52.270Z
+ 2025-10-14T19:57:43.598Zhttps://docs.axolotl.ai/docs/api/monkeypatch.mistral_attn_hijack_flash.html
- 2025-10-10T13:47:53.251Z
+ 2025-10-14T19:57:44.583Zhttps://docs.axolotl.ai/docs/api/loaders.model.html
- 2025-10-10T13:47:52.787Z
+ 2025-10-14T19:57:44.116Zhttps://docs.axolotl.ai/docs/api/utils.distributed.html
- 2025-10-10T13:47:53.492Z
+ 2025-10-14T19:57:44.826Zhttps://docs.axolotl.ai/docs/api/utils.model_shard_quant.html
- 2025-10-10T13:47:53.400Z
+ 2025-10-14T19:57:44.733Zhttps://docs.axolotl.ai/docs/api/kernels.lora.html
- 2025-10-10T13:47:53.205Z
+ 2025-10-14T19:57:44.536Zhttps://docs.axolotl.ai/docs/api/cli.main.html
- 2025-10-10T13:47:52.482Z
+ 2025-10-14T19:57:43.812Zhttps://docs.axolotl.ai/docs/api/integrations.spectrum.args.html
- 2025-10-10T13:47:53.844Z
+ 2025-10-14T19:57:45.181Zhttps://docs.axolotl.ai/docs/api/utils.optimizers.adopt.html
- 2025-10-10T13:47:53.509Z
+ 2025-10-14T19:57:44.843Zhttps://docs.axolotl.ai/docs/api/cli.cloud.modal_.html
- 2025-10-10T13:47:52.644Z
+ 2025-10-14T19:57:43.974Zhttps://docs.axolotl.ai/docs/api/monkeypatch.llama_attn_hijack_flash.html
- 2025-10-10T13:47:53.248Z
+ 2025-10-14T19:57:44.579Zhttps://docs.axolotl.ai/docs/api/core.builders.base.html
- 2025-10-10T13:47:52.365Z
+ 2025-10-14T19:57:43.693Zhttps://docs.axolotl.ai/docs/api/utils.schemas.trl.html
- 2025-10-10T13:47:53.611Z
+ 2025-10-14T19:57:44.945Zhttps://docs.axolotl.ai/docs/api/cli.utils.args.html
- 2025-10-10T13:47:52.659Z
+ 2025-10-14T19:57:43.989Zhttps://docs.axolotl.ai/docs/api/core.trainers.base.html
- 2025-10-10T13:47:52.712Z
+ 2025-10-14T19:57:44.042Zhttps://docs.axolotl.ai/docs/api/monkeypatch.llama_patch_multipack.html
- 2025-10-10T13:47:53.307Z
+ 2025-10-14T19:57:44.639Zhttps://docs.axolotl.ai/docs/api/monkeypatch.llama_attn_hijack_xformers.html
- 2025-10-10T13:47:53.249Z
+ 2025-10-14T19:57:44.581Zhttps://docs.axolotl.ai/docs/api/utils.schemas.model.html
- 2025-10-10T13:47:53.565Z
+ 2025-10-14T19:57:44.900Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.kto.chatml.html
- 2025-10-10T13:47:53.069Z
+ 2025-10-14T19:57:44.399Zhttps://docs.axolotl.ai/docs/api/utils.callbacks.mlflow_.html
- 2025-10-10T13:47:53.970Z
+ 2025-10-14T19:57:45.308Zhttps://docs.axolotl.ai/docs/api/common.datasets.html
- 2025-10-10T13:47:53.866Z
+ 2025-10-14T19:57:45.203Zhttps://docs.axolotl.ai/docs/api/utils.schemas.datasets.html
- 2025-10-10T13:47:53.596Z
+ 2025-10-14T19:57:44.931Zhttps://docs.axolotl.ai/docs/api/cli.utils.fetch.html
- 2025-10-10T13:47:52.666Z
+ 2025-10-14T19:57:43.996Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.dpo.chatml.html
- 2025-10-10T13:47:53.043Z
+ 2025-10-14T19:57:44.374Zhttps://docs.axolotl.ai/docs/api/monkeypatch.relora.html
- 2025-10-10T13:47:53.257Z
+ 2025-10-14T19:57:44.589Zhttps://docs.axolotl.ai/docs/api/cli.evaluate.html
- 2025-10-10T13:47:52.502Z
+ 2025-10-14T19:57:43.832Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.dpo.zephyr.html
- 2025-10-10T13:47:53.045Z
+ 2025-10-14T19:57:44.375Zhttps://docs.axolotl.ai/docs/api/core.trainers.utils.html
- 2025-10-10T13:47:52.774Z
+ 2025-10-14T19:57:44.105Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_w_system.html
- 2025-10-10T13:47:52.943Z
+ 2025-10-14T19:57:44.273Zhttps://docs.axolotl.ai/docs/api/utils.chat_templates.html
- 2025-10-10T13:47:53.387Z
+ 2025-10-14T19:57:44.720Zhttps://docs.axolotl.ai/docs/api/utils.data.streaming.html
- 2025-10-10T13:47:53.510Z
+ 2025-10-14T19:57:44.844Zhttps://docs.axolotl.ai/docs/api/utils.bench.html
- 2025-10-10T13:47:53.404Z
+ 2025-10-14T19:57:44.737Zhttps://docs.axolotl.ai/docs/api/common.architectures.html
- 2025-10-10T13:47:53.846Z
+ 2025-10-14T19:57:45.183Zhttps://docs.axolotl.ai/docs/api/cli.checks.html
- 2025-10-10T13:47:52.538Z
+ 2025-10-14T19:57:43.869Zhttps://docs.axolotl.ai/docs/api/core.trainers.dpo.trainer.html
- 2025-10-10T13:47:52.745Z
+ 2025-10-14T19:57:44.075Zhttps://docs.axolotl.ai/docs/api/integrations.base.html
- 2025-10-10T13:47:53.817Z
+ 2025-10-14T19:57:45.154Zhttps://docs.axolotl.ai/docs/api/cli.utils.train.html
- 2025-10-10T13:47:52.694Z
+ 2025-10-14T19:57:44.024Zhttps://docs.axolotl.ai/docs/api/utils.collators.mamba.html
- 2025-10-10T13:47:53.896Z
+ 2025-10-14T19:57:45.234Zhttps://docs.axolotl.ai/docs/api/cli.art.html
- 2025-10-10T13:47:52.530Z
+ 2025-10-14T19:57:43.861Zhttps://docs.axolotl.ai/docs/api/monkeypatch.trainer_fsdp_optim.html
- 2025-10-10T13:47:53.318Z
+ 2025-10-14T19:57:44.651Zhttps://docs.axolotl.ai/docs/api/logging_config.html
- 2025-10-10T13:47:52.357Z
+ 2025-10-14T19:57:43.685Zhttps://docs.axolotl.ai/docs/api/utils.freeze.html
- 2025-10-10T13:47:53.413Z
+ 2025-10-14T19:57:44.747Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.metharme.html
- 2025-10-10T13:47:52.998Z
+ 2025-10-14T19:57:44.328Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_chat.html
- 2025-10-10T13:47:52.927Z
+ 2025-10-14T19:57:44.257Zhttps://docs.axolotl.ai/docs/api/monkeypatch.stablelm_attn_hijack_flash.html
- 2025-10-10T13:47:53.314Z
+ 2025-10-14T19:57:44.646Zhttps://docs.axolotl.ai/docs/api/models.mamba.modeling_mamba.html
- 2025-10-10T13:47:53.867Z
+ 2025-10-14T19:57:45.205Zhttps://docs.axolotl.ai/docs/api/core.trainers.trl.html
- 2025-10-10T13:47:52.730Z
+ 2025-10-14T19:57:44.060Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.input_output.html
- 2025-10-10T13:47:52.984Z
+ 2025-10-14T19:57:44.314Zhttps://docs.axolotl.ai/docs/api/loaders.constants.html
- 2025-10-10T13:47:52.820Z
+ 2025-10-14T19:57:44.149Zhttps://docs.axolotl.ai/docs/api/monkeypatch.data.batch_dataset_fetcher.html
- 2025-10-10T13:47:53.339Z
+ 2025-10-14T19:57:44.672Zhttps://docs.axolotl.ai/docs/api/cli.vllm_serve.html
- 2025-10-10T13:47:52.632Z
+ 2025-10-14T19:57:43.962Zhttps://docs.axolotl.ai/docs/api/prompt_tokenizers.html
- 2025-10-10T13:47:52.345Z
+ 2025-10-14T19:57:43.673Zhttps://docs.axolotl.ai/docs/api/cli.args.html
- 2025-10-10T13:47:52.526Z
+ 2025-10-14T19:57:43.857Zhttps://docs.axolotl.ai/docs/api/cli.inference.html
- 2025-10-10T13:47:52.583Z
+ 2025-10-14T19:57:43.913Zhttps://docs.axolotl.ai/docs/api/cli.utils.load.html
- 2025-10-10T13:47:52.673Z
+ 2025-10-14T19:57:44.003Zhttps://docs.axolotl.ai/docs/api/cli.preprocess.html
- 2025-10-10T13:47:52.617Z
+ 2025-10-14T19:57:43.948Zhttps://docs.axolotl.ai/docs/api/utils.callbacks.profiler.html
- 2025-10-10T13:47:53.963Z
+ 2025-10-14T19:57:45.302Zhttps://docs.axolotl.ai/docs/api/utils.callbacks.perplexity.html
- 2025-10-10T13:47:53.959Z
+ 2025-10-14T19:57:45.297Zhttps://docs.axolotl.ai/docs/api/core.chat.format.chatml.html
- 2025-10-10T13:47:52.422Z
+ 2025-10-14T19:57:43.752Zhttps://docs.axolotl.ai/docs/api/integrations.grokfast.optimizer.html
- 2025-10-10T13:47:53.822Z
+ 2025-10-14T19:57:45.159Zhttps://docs.axolotl.ai/docs/api/integrations.kd.trainer.html
- 2025-10-10T13:47:53.832Z
+ 2025-10-14T19:57:45.169Zhttps://docs.axolotl.ai/docs/api/monkeypatch.unsloth_.html
- 2025-10-10T13:47:53.328Z
+ 2025-10-14T19:57:44.660Zhttps://docs.axolotl.ai/docs/api/core.chat.format.llama3x.html
- 2025-10-10T13:47:52.424Z
+ 2025-10-14T19:57:43.753Zhttps://docs.axolotl.ai/docs/reward_modelling.html
- 2025-10-10T13:44:33.346Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/quantize.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/fsdp_qlora.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.004Zhttps://docs.axolotl.ai/docs/nd_parallelism.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/batch_vs_grad.html
- 2025-10-10T13:44:33.341Z
+ 2025-10-14T19:54:15.003Zhttps://docs.axolotl.ai/docs/multi-node.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/rlhf.html
- 2025-10-10T13:44:33.346Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/dataset-formats/stepwise_supervised.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.003Zhttps://docs.axolotl.ai/docs/dataset-formats/pretraining.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.003Zhttps://docs.axolotl.ai/docs/dataset-formats/tokenized.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.003Zhttps://docs.axolotl.ai/docs/dataset-formats/template_free.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.003Zhttps://docs.axolotl.ai/docs/multi-gpu.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/input_output.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.006Zhttps://docs.axolotl.ai/docs/docker.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.004Zhttps://docs.axolotl.ai/docs/gradient_checkpointing.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.004Zhttps://docs.axolotl.ai/docs/optimizations.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/sequence_parallelism.html
- 2025-10-10T13:44:33.346Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/dataset_loading.html
- 2025-10-10T13:44:33.342Z
+ 2025-10-14T19:54:15.003Zhttps://docs.axolotl.ai/docs/installation.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.006Zhttps://docs.axolotl.ai/docs/mixed_precision.html
- 2025-10-10T13:44:33.345Z
+ 2025-10-14T19:54:15.007Zhttps://docs.axolotl.ai/docs/unsloth.html
- 2025-10-10T13:44:33.346Z
+ 2025-10-14T19:54:15.008Zhttps://docs.axolotl.ai/src/axolotl/integrations/LICENSE.html
- 2025-10-10T13:44:33.367Z
+ 2025-10-14T19:54:15.029Z