Build a data collator for preference-tuning trainers.
+
Returns None for RL types that provide their own collator (e.g. GRPO,
+KTO), letting the trainer construct its default. For DPO/IPO/ORPO/SIMPO
+returns an AxolotlDPODataCollatorWithPadding when
+pad_to_multiple_of is set, otherwise None (so the trainer
+falls back to the TRL default).
+
+
diff --git a/docs/config-reference.html b/docs/config-reference.html
index 98e7995f3..c86d0e22e 100644
--- a/docs/config-reference.html
+++ b/docs/config-reference.html
@@ -1745,705 +1745,706 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});
# fragmentation and may prevent OOMs, by re-using memory more efficiently. Defaults to# True if `sample_packing` enabledpad_to_sequence_len: bool | None
-# Whether to use sequential sampling for curriculum learning
-curriculum_sampling: bool | None
-multipack_real_batches: bool | None
-
-# Use batch flattening for speedups when not using sample_packing
-batch_flattening: Literal['auto'] | bool | None
-
-use_pose: bool | None
-pose_split_on_token_ids: list[int] | None
-pose_max_context_len: int | None
-pose_num_chunks: int | None
-
-pretrain_multipack_buffer_size: int | None
-# whether to prevent cross attention for packed sequences during pretraining
-pretrain_multipack_attn: bool | None = True
-# whether to concatenate samples during pretraining
-pretraining_sample_concatenation: bool | None
-
-# Use streaming mode for loading datasets
-streaming: bool | None
-# Buffer size for multipack streaming datasets
-streaming_multipack_buffer_size: int | None = 10000
-
-# Whether to use xformers attention patch https://github.com/facebookresearch/xformers
-xformers_attention: bool | None
-# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/
-# torch.nn.functional.scaled_dot_product_attention.html
-sdp_attention: bool | None
-# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf
-s2_attention: bool | None
-flex_attention: bool | None
-flex_attn_compile_kwargs: dict[str, Any] | None
-# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention
-flash_attention: bool | None
-# Whether to use flash-attention cross entropy implementation - advanced use only
-flash_attn_cross_entropy: bool | None
-# Whether to use flash-attention rms norm implementation - advanced use only
-flash_attn_rms_norm: bool | None
-# Whether to fuse part of the MLP into a single operation
-flash_attn_fuse_mlp: bool | None
-# Whether to use bettertransformers
-flash_optimum: bool | None
-# Whether to use SageAttention https://github.com/thu-ml/SageAttention
-sage_attention: bool | None
-
-eager_attention: bool | None
+# Pad each batch to a multiple of this value.
+pad_to_multiple_of: int | None
+# Whether to use sequential sampling for curriculum learning
+curriculum_sampling: bool | None
+multipack_real_batches: bool | None
+
+# Use batch flattening for speedups when not using sample_packing
+batch_flattening: Literal['auto'] | bool | None
+
+use_pose: bool | None
+pose_split_on_token_ids: list[int] | None
+pose_max_context_len: int | None
+pose_num_chunks: int | None
+
+pretrain_multipack_buffer_size: int | None
+# whether to prevent cross attention for packed sequences during pretraining
+pretrain_multipack_attn: bool | None = True
+# whether to concatenate samples during pretraining
+pretraining_sample_concatenation: bool | None
+
+# Use streaming mode for loading datasets
+streaming: bool | None
+# Buffer size for multipack streaming datasets
+streaming_multipack_buffer_size: int | None = 10000
+
+# Whether to use xformers attention patch https://github.com/facebookresearch/xformers
+xformers_attention: bool | None
+# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/
+# torch.nn.functional.scaled_dot_product_attention.html
+sdp_attention: bool | None
+# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf
+s2_attention: bool | None
+flex_attention: bool | None
+flex_attn_compile_kwargs: dict[str, Any] | None
+# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention
+flash_attention: bool | None
+# Whether to use flash-attention cross entropy implementation - advanced use only
+flash_attn_cross_entropy: bool | None
+# Whether to use flash-attention rms norm implementation - advanced use only
+flash_attn_rms_norm: bool | None
+# Whether to fuse part of the MLP into a single operation
+flash_attn_fuse_mlp: bool | None
+# Whether to use bettertransformers
+flash_optimum: bool | None
+# Whether to use SageAttention https://github.com/thu-ml/SageAttention
+sage_attention: bool | None
-# Specify a custom attention implementation, used mostly for kernels.
-attn_implementation: str | None
-
-# Use hybrid attention for Gemma 4: flash_attention_2 for sliding window layers and sdpa
-# for global (full_attention) layers. Global layers have head_dim=512 which exceeds
-# flash attention's supported size.
-gemma4_hybrid_attn_impl: bool | None
-
-# Which experts implementation to use for MoE models,
-experts_implementation: str | None
-
-# Quantize MoE expert weights on load to reduce VRAM. Requires adapter (lora/qlora) with
-# load_in_4bit or load_in_8bit. Requires CUDA (not compatible with ROCm or other
-# backends). Note: total parameter count may be reported incorrectly when enabled
-# (trainable param count is correct).
-quantize_moe_experts: bool = False
-
-# Whether to use Scaled Softmax (SSMax) attention. Ref: https://arxiv.org/abs/2501.19399
-scaling_softmax: bool | None
-# Scaling factor for SSMax attention. Default is 0.43
-scaling_softmax_factor: float | None
-# Bias for SSMax attention. Default is 0.0. Note: The paper recommends bias=0 for better
-# length generalization.
-scaling_softmax_bias: float | None
-
-# Apply custom LoRA autograd functions and activation function Triton kernels for speed
-# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html
-lora_mlp_kernel: bool | None
-# Apply custom LoRA autograd functions and activation function Triton kernels for speed
-# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html
-lora_qkv_kernel: bool | None
-# Apply custom LoRA autograd functions and activation function Triton kernels for speed
-# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html
-lora_o_kernel: bool | None
-# Apply custom LoRA autograd function for embedding layers. See:
-# https://docs.axolotl.ai/docs/lora_optims.html
-lora_embedding_kernel: bool | None
-
-# Whether to use chunked cross entropy loss for memory efficiency
-chunked_cross_entropy: bool | None
-# Number of chunks to use for chunked cross entropy loss
-chunked_cross_entropy_num_chunks: int | None
-# Enable Entropy-Aware Focal Training loss (EAFT)
-use_eaft: bool | None
-# Exponent for entropy weighting in EAFT (default: 1.0)
-eaft_alpha: float | None = 1.0
-# Number of top logits for entropy approximation (default: 20)
-eaft_k: int | None = 20
-
-# Whether to use ALST tiled mlp for memory efficient long context
-tiled_mlp: bool | None
-
-# Number of shards to use for ALST tiled mlp. If unset, it will be set based on
-# seqlen/hidden_size
-tiled_mlp_num_shards: int | None
-
-# Whether to use original mlp for ALST tiled mlp. Otherwise uses a generic MLP based on
-# llama.
-tiled_mlp_use_original_mlp: bool | None = True
-
-llama4_linearized_experts: bool | None
+eager_attention: bool | None
+
+# Specify a custom attention implementation, used mostly for kernels.
+attn_implementation: str | None
+
+# Use hybrid attention for Gemma 4: flash_attention_2 for sliding window layers and sdpa
+# for global (full_attention) layers. Global layers have head_dim=512 which exceeds
+# flash attention's supported size.
+gemma4_hybrid_attn_impl: bool | None
+
+# Which experts implementation to use for MoE models,
+experts_implementation: str | None
+
+# Quantize MoE expert weights on load to reduce VRAM. Requires adapter (lora/qlora) with
+# load_in_4bit or load_in_8bit. Requires CUDA (not compatible with ROCm or other
+# backends). Note: total parameter count may be reported incorrectly when enabled
+# (trainable param count is correct).
+quantize_moe_experts: bool = False
+
+# Whether to use Scaled Softmax (SSMax) attention. Ref: https://arxiv.org/abs/2501.19399
+scaling_softmax: bool | None
+# Scaling factor for SSMax attention. Default is 0.43
+scaling_softmax_factor: float | None
+# Bias for SSMax attention. Default is 0.0. Note: The paper recommends bias=0 for better
+# length generalization.
+scaling_softmax_bias: float | None
+
+# Apply custom LoRA autograd functions and activation function Triton kernels for speed
+# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html
+lora_mlp_kernel: bool | None
+# Apply custom LoRA autograd functions and activation function Triton kernels for speed
+# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html
+lora_qkv_kernel: bool | None
+# Apply custom LoRA autograd functions and activation function Triton kernels for speed
+# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html
+lora_o_kernel: bool | None
+# Apply custom LoRA autograd function for embedding layers. See:
+# https://docs.axolotl.ai/docs/lora_optims.html
+lora_embedding_kernel: bool | None
+
+# Whether to use chunked cross entropy loss for memory efficiency
+chunked_cross_entropy: bool | None
+# Number of chunks to use for chunked cross entropy loss
+chunked_cross_entropy_num_chunks: int | None
+# Enable Entropy-Aware Focal Training loss (EAFT)
+use_eaft: bool | None
+# Exponent for entropy weighting in EAFT (default: 1.0)
+eaft_alpha: float | None = 1.0
+# Number of top logits for entropy approximation (default: 20)
+eaft_k: int | None = 20
+
+# Whether to use ALST tiled mlp for memory efficient long context
+tiled_mlp: bool | None
+
+# Number of shards to use for ALST tiled mlp. If unset, it will be set based on
+# seqlen/hidden_size
+tiled_mlp_num_shards: int | None
+
+# Whether to use original mlp for ALST tiled mlp. Otherwise uses a generic MLP based on
+# llama.
+tiled_mlp_use_original_mlp: bool | None = True
-# Deepspeed config path. e.g., deepspeed_configs/zero3.json
-deepspeed: str | dict[str, Any] | None
-# Whether to use deepcompile for faster training with deepspeed
-deepcompile: bool | None
-# FSDP configuration
-fsdp: list[str] | None
-
-# FSDP configuration options
-fsdp_config: FSDPConfig | None
- # For FSDPConfig:
- # FSDP version
-fsdp_version: int | None
- # Enable activation checkpointing to reduce memory usage during forward passes
-activation_checkpointing: bool | None
- # Offload parameters to CPU to reduce GPU memory usage
-offload_params: bool | None
- # Synchronize module states across all processes
-sync_module_states: bool | None
- # Enable CPU RAM efficient loading to reduce memory usage during model loading
-cpu_ram_efficient_loading: bool | None
- # Disabling this enables swap memory usage for resource-constrained setups when
- # offload_params is enabled.
-cpu_offload_pin_memory: bool | None
- # Use original parameters instead of flattened parameters
-use_orig_params: bool | None
-
- # Type of state dict to use for saving/loading checkpoints
-state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None
- # Final state dict type to use after training completion
-final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None
-
- # Policy for automatically wrapping modules with FSDP
-auto_wrap_policy: Literal['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP'] | None
- # Class name of transformer layers to wrap (e.g., 'LlamaDecoderLayer')
-transformer_layer_cls_to_wrap: str | None
-
- # Reshard parameters after forward pass to save memory
-reshard_after_forward: bool | None
- # Mixed precision policy for FSDP (e.g., 'fp16', 'bf16')
-mixed_precision_policy: str | None
-
-# FSDP version
-fsdp_version: int | None
-fsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None
-
-# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for
-# no eval.
-val_set_size: float | None = 0.0
-
-# Number of devices to shard across. If not set, will use all available devices.
-dp_shard_size: int | None
-# Number of devices to replicate across.
-dp_replicate_size: int | None
-# Deprecated: use `context_parallel_size` instead
-sequence_parallel_degree: int | None
-# Set to a divisor of the number of GPUs available to split sequences into chunks of
-# equal size. Use in long context training to prevent OOM when sequences cannot fit into
-# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each
-# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized
-# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more
-# details.
-context_parallel_size: int | None
-# Optional; strides across the key dimension. Larger values use more memory but should
-# make training faster. Must evenly divide the number of KV heads in your model.
-heads_k_stride: int | None
-# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to
-# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing
-# case.
-ring_attn_func: RingAttnFunc | None
-# Number of tensor parallel processes in TP group. Only supported with DeepSpeed AutoTP.
-tensor_parallel_size: int | None
-
-# Add or change special tokens. If you add tokens here, you don't need to add them to
-# the `tokens` list.
-special_tokens: SpecialTokensConfig | None
- # For SpecialTokensConfig:
-bos_token: str | None
-eos_token: str | None
-pad_token: str | None
-unk_token: str | None
-additional_special_tokens: list[str] | None
-
-# Add extra tokens to the tokenizer
-tokens: list[str] | None
-# Mapping token_id to new_token_string to override reserved added_tokens in the
-# tokenizer. Only works for tokens that are not part of the base vocab (aka are
-# added_tokens). Can be checked if they exist in tokenizer.json added_tokens.
-added_tokens_overrides: dict[int, str] | None
-
-# Whether to use torch.compile and which backend to use. setting to `auto` will enable
-# torch compile when torch>=2.6.0
-torch_compile: Literal['auto'] | bool | None
-# Backend to use for torch.compile
-torch_compile_backend: str | None
-torch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None
-
-# Maximum number of iterations to train for. It precedes num_epochs which means that if
-# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =>
-# `num_epochs: 2` and `max_steps: 100` will train for 100 steps
-max_steps: int | None
-# Number of warmup steps. Cannot use with warmup_ratio
-warmup_steps: int | None
-# Warmup ratio. Cannot use with warmup_steps
-warmup_ratio: float | None
-# Leave empty to eval at each epoch, integer for every N steps. float for fraction of
-# total steps
-eval_steps: int | float | None
-# Number of times per epoch to run evals, mutually exclusive with eval_steps
-evals_per_epoch: int | None
-# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer
-# from `eval_steps`
-eval_strategy: str | None
-
-# Leave empty to save at each epoch, integer for every N steps. float for fraction of
-# total steps
-save_steps: int | float | None
-# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps
-saves_per_epoch: int | None
-# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better
-# result is achieved, leave empty to infer from `save_steps`
-save_strategy: str | None
-# Checkpoints saved at a time
-save_total_limit: int | None
-# Whether to checkpoint a model after the first step of training. Defaults to False.
-save_first_step: bool | None
-
-# Logging frequency
-logging_steps: int | None
-# Stop training after this many evaluation losses have increased in a row. https://huggi
-# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin
-# gCallback
-early_stopping_patience: int | None
-load_best_model_at_end: bool | None = False
-# Save only the model weights, skipping the optimizer. Using this means you can't resume
-# from checkpoints.
-save_only_model: bool | None = False
-# Use tensorboard for logging
-use_tensorboard: bool | None
-# Enable the pytorch profiler to capture the first N steps of training to the
-# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more
-# information. Snapshots can be visualized @ https://pytorch.org/memory_viz
-profiler_steps: int | None
-# Which step to start the profiler at. Useful for only capturing a few steps mid-run.
-profiler_steps_start: int | None = 0
-# bool of whether to report tokens per second at the end of training. This is not
-# supported with pre-training datasets.
-include_tokens_per_second: bool | None
-# bool of whether to report tokens per second per-gpu during training by measuring
-# throughput of non-padding tokens.
-include_tkps: bool | None = True
-# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to
-# add noise to embeddings. Currently only supported on Llama and Mistral
-neftune_noise_alpha: float | None
-
-# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to
-# `beta` in `ORPOConfig` due to trl mapping.
-orpo_alpha: float | None
-# Target reward margin for the SimPO loss
-simpo_gamma: float | None
-# Weight of the BC regularizer
-cpo_alpha: float | None
-
-# Factor for desirable loss term in KTO loss
-kto_desirable_weight: float | None
-# Factor for undesirable loss term in KTO loss
-kto_undesirable_weight: float | None
-# The beta parameter for the RL training
-rl_beta: float | None
-
-# Defines the max memory usage per gpu on the system. Passed through to transformers
-# when loading the model.
-max_memory: dict[int | Literal['cpu', 'disk'], int | str] | None
-# Limit the memory for all available GPUs to this amount (if an integer, expressed in
-# gigabytes); default: unset
-gpu_memory_limit: int | str | None
-# Whether to use low_cpu_mem_usage
-low_cpu_mem_usage: bool | None
-
-# The name of the chat template to use for training, following values are supported:
-# tokenizer_default: Uses the chat template that is available in the
-# tokenizer_config.json. If the chat template is not available in the tokenizer, it will
-# raise an error. This is the default value.
-# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates
-# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.
-# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.
-# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not
-# available in the tokenizer. jinja: Uses a custom jinja template for the chat template.
-# The custom jinja template should be provided in the chat_template_jinja field. The
-# selected chat template will be saved to the tokenizer_config.json for easier
-# inferencing
-chat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None
-# Custom jinja template or path to jinja file for chat template. This will be only used
-# if chat_template is set to `jinja` or `null` (in which case chat_template is
-# automatically set to `jinja`). Default is null.
-chat_template_jinja: str | None
-# Additional kwargs to pass to the chat template. This is useful for customizing the
-# chat template. For example, you can pass `thinking=False` to add a generation prompt
-# to the chat template.
-chat_template_kwargs: dict[str, Any] | None
-# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the
-# boundaries between conversation turns. For example: ['/INST', '</s>',
-# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is
-# useful for templates that use multiple delimiter tokens.
-eot_tokens: list[str] | None
-# Changes the default system message. Currently only supports chatml.
-default_system_message: str | None
-
-# Token index or indices to adjust embedding weights to the mean of the other tokens.
-# This is useful when the model has untrained embeddings.
-fix_untrained_tokens: int | list[int] | None
-
-is_preprocess: bool | None
-preprocess_iterable: bool | None
-
-# Total number of tokens - internal use
-total_num_tokens: int | None
-total_supervised_tokens: int | None
-# You can set these packing optimizations AFTER starting a training at least once. The
-# trainer will provide recommended values for these values.
-sample_packing_eff_est: float | None
-axolotl_config_path: str | None
-
-# Internal use only - Used to identify which the model is based on
-is_falcon_derived_model: bool | None
-# Internal use only - Used to identify which the model is based on
-is_llama_derived_model: bool | None
-# Internal use only - Used to identify which the model is based on. Please note that if
-# you set this to true, `padding_side` will be set to 'left' by default
-is_mistral_derived_model: bool | None
-# Internal use only - Used to identify which the model is based on
-is_qwen_derived_model: bool | None
-
-# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available
-# plugins or doc below for more details.
-# https://docs.axolotl.ai/docs/custom_integrations.html
-plugins: list[str] | None
-# Enable sample generation during training for monitoring
-generate_samples: bool | None = False
-# Number of samples to generate at each interval
-num_generation_samples: int | None = 3
-# Maximum new tokens to generate per sample
-generation_max_new_tokens: int | None = 50
-# Temperature for sample generation (0.0 = greedy)
-generation_temperature: float | None = 0.7
-# Nucleus sampling parameter for generation
-generation_top_p: float | None
-# Top-k sampling parameter for generation
-generation_top_k: int | None
-# Ratio of input to use as prompt (0.0-1.0)
-generation_prompt_ratio: float | None = 0.5
-# Whether to use sampling (vs greedy decoding)
-generation_do_sample: bool | None = True
-
-# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This
-# can also be a relative path to a model on disk
-base_model: str (required)
-# If the base_model repo on hf hub doesn't include configuration .json files, You can
-# set that here, or leave this empty to default to base_model
-base_model_config: str | None
-# transformers config class (e.g., 'LlamaConfig', 'MistralConfig'). Defaults to
-# AutoConfig.
-cls_model_config: str | None
-# Optional tokenizer configuration path in case you want to use a different tokenizer
-# than the one defined in the base model
-tokenizer_config: str | None
-# use_fast option for tokenizer loading from_pretrained, default to True
-tokenizer_use_fast: bool | None
-# Whether to use the legacy tokenizer setting, defaults to True
-tokenizer_legacy: bool | None
-# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-
-# common tokenizer.
-tokenizer_use_mistral_common: bool | None
-# Corresponding tokenizer for the model AutoTokenizer is a good choice
-tokenizer_type: str | None
-# transformers processor class
-processor_type: str | None
-# kwargs forwarded to the processor's from_pretrained(), overriding processor config
-# (e.g. image_seq_length, min_pixels, etc.).
-processor_kwargs: dict[str, Any] | None
-# Whether to save jinja files for tokenizer, transformers default is True
-tokenizer_save_jinja_files: bool | None = True
-# Trust remote code for untrusted source
-trust_remote_code: bool | None
-
-# Don't move the model to the device before sharding. Set to `false` to revert to legacy
-# behavior.
-experimental_skip_move_to_device: bool | None = True
-
-# Use custom kernels, e.g. MegaBlocks.
-use_kernels: bool | None
-
-# Model loading quantization config
-model_quantization_config: Literal['Mxfp4Config', 'FineGrainedFP8Config'] | None
-# kwargs for model quantization config
-model_quantization_config_kwargs: dict[str, Any] | None
-
-# Where to save the full-finetuned model to
-output_dir: str = ./model-out
-# push checkpoints to hub
-hub_model_id: str | None
-# how to push checkpoints to hub
-hub_strategy: str | None
-# branch/revision to push to on hub (default: main)
-hub_revision: str | None
-# Whether to save the model using safetensors format. Defaults to True.
-save_safetensors: bool | None = True
-
-# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer
-load_in_8bit: bool | None = False
-# Use bitsandbytes 4 bit
-load_in_4bit: bool | None = False
-
-# If you want to use 'lora', 'qlora', or 'llama-adapter', or leave blank to train all
-# parameters in original model
-adapter: Literal['lora', 'qlora', 'llama-adapter'] | None
-# If you already have a lora model trained that you want to load, put that here. This
-# means after training, if you want to test the model, you should set this to the value
-# of `output_dir`. Note that if you merge an adapter to the base model, a new
-# subdirectory `merged` will be created under the `output_dir`.
-lora_model_dir: str | None
-lora_r: int | None
-lora_alpha: int | None
-lora_fan_in_fan_out: bool | None
-lora_target_modules: str | list[str] | None
-lora_target_parameters: str | list[str] | None
-# If true, will target all linear modules
-lora_target_linear: bool | None
-# If you added new tokens to the tokenizer, you may need to save some LoRA modules
-# because they need to know the new tokens. For LLaMA and Mistral, you need to save
-# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts
-# tokens to embeddings, and `lm_head` converts embeddings to token probabilities.
-lora_modules_to_save: list[str] | None
-lora_dropout: float | None = 0.0
-# The layer indices to transform, otherwise, apply to all layers
-peft_layers_to_transform: list[int] | None
-peft_layers_pattern: list[str] | None
-
-peft: PeftConfig | None
- # For PeftConfig:
- # Configuration options for loftq initialization for LoRA
-loftq_config: LoftQConfig | None
- # For LoftQConfig:
- # typically 4 bits
-loftq_bits: int = 4
-
-# Whether to use DoRA.
-peft_use_dora: bool | None
-# Whether to use RSLoRA.
-peft_use_rslora: bool | None
-# List of layer indices to replicate.
-peft_layer_replication: list[tuple[int, int]] | None
-# How to initialize LoRA weights. Default to True which is MS original implementation.
-peft_init_lora_weights: bool | str | None
-# A list of token indices to fine-tune on the `embed_tokens` layer. Otherwise, a dict
-# mapping an embedding layer name to its trainable token indices. See
-# https://huggingface.co/docs/peft/v0.17.0/en/developer_guides/lora#efficiently-train-
-# tokens-alongside-lora
-peft_trainable_token_indices: list[int] | dict[str, list[int]] | None
-# Whether to tie adapter weights for tied model weights. See
-# https://github.com/huggingface/peft/issues/2864
-peft_ensure_weight_tying: bool | None
-# Whether to upcast the LoRA adapter to fp32. This is enabled by default in PEFT.
-peft_autocast_adapter_dtype: bool | None
-
-# load qlora model in sharded format for FSDP using answer.ai technique.
-qlora_sharded_model_loading: bool | None = False
-# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it
-# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge
-lora_on_cpu: bool | None
-# Whether you are training a 4-bit GPTQ quantized model
-gptq: bool | None
-# optional overrides to the bnb 4bit quantization configuration
-bnb_config_kwargs: dict[str, Any] | None
-
-# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.
-loraplus_lr_ratio: float | None
-# loraplus learning rate for lora embedding layers. Default value is 1e-6.
-loraplus_lr_embedding: float | None = 1e-06
-
-merge_lora: bool | None
-# Method to use for LoRA merging. 'memory_efficient' (default) processes shards
-# individually to reduce memory usage, 'legacy' loads the full model into memory.
-merge_method: Literal['legacy', 'memory_efficient'] | None = memory_efficient
-
-# Whether to use ReLoRA. Use with jagged_restart_*steps options.
-relora: bool | None
-# threshold for optimizer magnitude when pruning
-relora_prune_ratio: float | None
-# True to perform lora weight merges on cpu during restarts, for modest gpu memory
-# savings
-relora_cpu_offload: bool | None
-
-# how often to reset for jagged restarts
-jagged_restart_steps: int | None
-# how many warmup steps to take after reset for jagged restarts
-jagged_restart_warmup_steps: int | None
-# how many anneal steps to take before reset for jagged restarts
-jagged_restart_anneal_steps: int | None
-
-# If greater than 1, backpropagation will be skipped and the gradients will be
-# accumulated for the given number of steps.
-gradient_accumulation_steps: int | None = 1
-# The number of samples to include in each batch. This is the number of samples sent to
-# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps
-micro_batch_size: int | None = 1
-# Total batch size, we do not recommended setting this manually
-batch_size: int | None
-# per gpu micro batch size for evals, defaults to value of micro_batch_size
-eval_batch_size: int | None
-
-# whether to find batch size that fits in memory. Passed to underlying transformers
-# Trainer
-auto_find_batch_size: bool | None
-
-# Whether to mask out or include the human's prompt from the training labels
-train_on_inputs: bool | None = False
-# Group similarly sized data to minimize padding. May be slower to start, as it must
-# download and sort the entire dataset. Note that training loss may have an oscillating
-# pattern with this enabled.
-group_by_length: bool | None
-
-learning_rate: str | float (required)
-embedding_lr: float | None
-embedding_lr_scale: float | None
-# Specify weight decay
-weight_decay: float | None = 0.0
-# Specify optimizer
-optimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED
-# Dictionary of arguments to pass to the optimizer
-optim_args: str | dict[str, Any] | None
-# The target modules to optimize, i.e. the module names that you would like to train,
-# right now this is used only for GaLore algorithm
-optim_target_modules: list[str] | Literal['all_linear'] | None
-# Path to torch distx for optim 'adamw_anyprecision'
-torchdistx_path: str | None
-lr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE
-# Specify a scheduler and kwargs to use with the optimizer
-lr_scheduler_kwargs: dict[str, Any] | None
-lr_quadratic_warmup: bool | None
-# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of
-# peak lr
-cosine_min_lr_ratio: float | None
-# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means
-# start cosine_min_lr at 80% of training step
-cosine_constant_lr_ratio: float | None
-# Learning rate div factor
-lr_div_factor: float | None
-
-lr_groups: list[LrGroup] | None
- # For LrGroup:
-name: str (required)
-modules: list[str] (required)
-lr: float (required)
-
-# adamw hyperparams
-adam_epsilon: float | None
-# only used for CAME Optimizer
-adam_epsilon2: float | None
-# adamw hyperparams
-adam_beta1: float | None
-# adamw hyperparams
-adam_beta2: float | None
-# only used for CAME Optimizer
-adam_beta3: float | None
-
-# Dion Optimizer learning rate
-dion_lr: float | None
-# Dion Optimizer momentum
-dion_momentum: float | None
-# Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank
-# dimension.
-dion_rank_fraction: float | None = 1.0
-# Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may
-# be useful to ensure even sharding.
-dion_rank_multiple_of: int | None = 1
-
-# Gradient clipping max norm
-max_grad_norm: float | None
-num_epochs: float = 1.0
-
-use_wandb: bool | None
-# Set the name of your wandb run
-wandb_name: str | None
-# Set the ID of your wandb run
-wandb_run_id: str | None
-# "offline" to save run metadata locally and not sync to the server, "disabled" to turn
-# off wandb
-wandb_mode: str | None
-# Your wandb project name
-wandb_project: str | None
-# A wandb Team name if using a Team
-wandb_entity: str | None
-wandb_watch: str | None
-# "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only
-# at the end of training
-wandb_log_model: str | None
-
-use_mlflow: bool | None
-# URI to mlflow
-mlflow_tracking_uri: str | None
-# Your experiment name
-mlflow_experiment_name: str | None
-# Your run name
-mlflow_run_name: str | None
-# set to true to copy each saved checkpoint on each save to mlflow artifact registry
-hf_mlflow_log_artifacts: bool | None
-
-# Enable or disable Comet integration.
-use_comet: bool | None
-# API key for Comet. Recommended to set via `comet login`.
-comet_api_key: str | None
-# Workspace name in Comet. Defaults to the user's default workspace.
-comet_workspace: str | None
-# Project name in Comet. Defaults to Uncategorized.
-comet_project_name: str | None
-# Identifier for the experiment. Used to append data to an existing experiment or
-# control the key of new experiments. Default to a random key.
-comet_experiment_key: str | None
-# Create a new experiment ("create") or log to an existing one ("get"). Default
-# ("get_or_create") auto-selects based on configuration.
-comet_mode: str | None
-# Set to True to log data to Comet server, or False for offline storage. Default is
-# True.
-comet_online: bool | None
-# Dictionary for additional configuration settings, see the doc for more details.
-comet_experiment_config: dict[str, Any] | None
-
-use_trackio: bool | None
-# Your trackio project name
-trackio_project_name: str | None
-# Set the name of your trackio run
-trackio_run_name: str | None
-# Hugging Face Space ID to sync dashboard to (optional, runs locally if not provided)
-trackio_space_id: str | None
-
-# Enable OpenTelemetry metrics collection and Prometheus export
-use_otel_metrics: bool | None = False
-# Host to bind the OpenTelemetry metrics server to
-otel_metrics_host: str | None = localhost
-# Port for the Prometheus metrics HTTP server
-otel_metrics_port: int | None = 8000
-
-# the number of activate layers in LISA
-lisa_n_layers: int | None
-# how often to switch layers in LISA
-lisa_step_interval: int | None
-# path under the model to access the layers
-lisa_layers_attribute: str | None = model.layers
-
-gradio_title: str | None
-gradio_share: bool | None
-gradio_server_name: str | None
-gradio_server_port: int | None
-gradio_max_new_tokens: int | None
-gradio_temperature: float | None
-
-use_ray: bool = False
-ray_run_name: str | None
-ray_num_workers: int = 1
-resources_per_worker: dict
-
-# The size of the image to resize to. It can be an integer (resized into padded-square
-# image) or a tuple (width, height).If not provided, we will attempt to load from
-# preprocessor.size, otherwise, images won't be resized.
-image_size: int | tuple[int, int] | None
-# The resampling algorithm to use for image resizing. Default is bilinear. Please refer
-# to PIL.Image.Resampling for more details.
-image_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None
-
-# optional overrides to the base model configuration
-overrides_of_model_config: dict[str, Any] | None
-# optional overrides the base model loading from_pretrained
-overrides_of_model_kwargs: dict[str, Any] | None
-# If you want to specify the type of model to load, AutoModelForCausalLM is a good
-# choice too
-type_of_model: str | None
-# You can specify to choose a specific model revision from huggingface hub
-revision_of_model: str | None
-
-max_packed_sequence_len: int | None
-rope_scaling: Any | None
-noisy_embedding_alpha: float | None
-dpo_beta: float | None
-evaluation_strategy: str | None
-eval_table_size: int | None
-eval_max_new_tokens: int | None
-dpo_use_logits_to_keep: bool | None
-dpo_generate_during_eval: bool | None
-dpo_norm_loss: bool | None
-rpo_alpha: float | None
+llama4_linearized_experts: bool | None
+
+# Deepspeed config path. e.g., deepspeed_configs/zero3.json
+deepspeed: str | dict[str, Any] | None
+# Whether to use deepcompile for faster training with deepspeed
+deepcompile: bool | None
+# FSDP configuration
+fsdp: list[str] | None
+
+# FSDP configuration options
+fsdp_config: FSDPConfig | None
+ # For FSDPConfig:
+ # FSDP version
+fsdp_version: int | None
+ # Enable activation checkpointing to reduce memory usage during forward passes
+activation_checkpointing: bool | None
+ # Offload parameters to CPU to reduce GPU memory usage
+offload_params: bool | None
+ # Synchronize module states across all processes
+sync_module_states: bool | None
+ # Enable CPU RAM efficient loading to reduce memory usage during model loading
+cpu_ram_efficient_loading: bool | None
+ # Disabling this enables swap memory usage for resource-constrained setups when
+ # offload_params is enabled.
+cpu_offload_pin_memory: bool | None
+ # Use original parameters instead of flattened parameters
+use_orig_params: bool | None
+
+ # Type of state dict to use for saving/loading checkpoints
+state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None
+ # Final state dict type to use after training completion
+final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None
+
+ # Policy for automatically wrapping modules with FSDP
+auto_wrap_policy: Literal['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP'] | None
+ # Class name of transformer layers to wrap (e.g., 'LlamaDecoderLayer')
+transformer_layer_cls_to_wrap: str | None
+
+ # Reshard parameters after forward pass to save memory
+reshard_after_forward: bool | None
+ # Mixed precision policy for FSDP (e.g., 'fp16', 'bf16')
+mixed_precision_policy: str | None
+
+# FSDP version
+fsdp_version: int | None
+fsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None
+
+# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for
+# no eval.
+val_set_size: float | None = 0.0
+
+# Number of devices to shard across. If not set, will use all available devices.
+dp_shard_size: int | None
+# Number of devices to replicate across.
+dp_replicate_size: int | None
+# Deprecated: use `context_parallel_size` instead
+sequence_parallel_degree: int | None
+# Set to a divisor of the number of GPUs available to split sequences into chunks of
+# equal size. Use in long context training to prevent OOM when sequences cannot fit into
+# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each
+# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized
+# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more
+# details.
+context_parallel_size: int | None
+# Optional; strides across the key dimension. Larger values use more memory but should
+# make training faster. Must evenly divide the number of KV heads in your model.
+heads_k_stride: int | None
+# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to
+# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing
+# case.
+ring_attn_func: RingAttnFunc | None
+# Number of tensor parallel processes in TP group. Only supported with DeepSpeed AutoTP.
+tensor_parallel_size: int | None
+
+# Add or change special tokens. If you add tokens here, you don't need to add them to
+# the `tokens` list.
+special_tokens: SpecialTokensConfig | None
+ # For SpecialTokensConfig:
+bos_token: str | None
+eos_token: str | None
+pad_token: str | None
+unk_token: str | None
+additional_special_tokens: list[str] | None
+
+# Add extra tokens to the tokenizer
+tokens: list[str] | None
+# Mapping token_id to new_token_string to override reserved added_tokens in the
+# tokenizer. Only works for tokens that are not part of the base vocab (aka are
+# added_tokens). Can be checked if they exist in tokenizer.json added_tokens.
+added_tokens_overrides: dict[int, str] | None
+
+# Whether to use torch.compile and which backend to use.
+torch_compile: Literal['auto'] | bool | None
+# Backend to use for torch.compile
+torch_compile_backend: str | None
+torch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None
+
+# Maximum number of iterations to train for. It precedes num_epochs which means that if
+# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =>
+# `num_epochs: 2` and `max_steps: 100` will train for 100 steps
+max_steps: int | None
+# Number of warmup steps. Cannot use with warmup_ratio
+warmup_steps: int | None
+# Warmup ratio. Cannot use with warmup_steps
+warmup_ratio: float | None
+# Leave empty to eval at each epoch, integer for every N steps. float for fraction of
+# total steps
+eval_steps: int | float | None
+# Number of times per epoch to run evals, mutually exclusive with eval_steps
+evals_per_epoch: int | None
+# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer
+# from `eval_steps`
+eval_strategy: str | None
+
+# Leave empty to save at each epoch, integer for every N steps. float for fraction of
+# total steps
+save_steps: int | float | None
+# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps
+saves_per_epoch: int | None
+# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better
+# result is achieved, leave empty to infer from `save_steps`
+save_strategy: str | None
+# Checkpoints saved at a time
+save_total_limit: int | None
+# Whether to checkpoint a model after the first step of training. Defaults to False.
+save_first_step: bool | None
+
+# Logging frequency
+logging_steps: int | None
+# Stop training after this many evaluation losses have increased in a row. https://huggi
+# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin
+# gCallback
+early_stopping_patience: int | None
+load_best_model_at_end: bool | None = False
+# Save only the model weights, skipping the optimizer. Using this means you can't resume
+# from checkpoints.
+save_only_model: bool | None = False
+# Use tensorboard for logging
+use_tensorboard: bool | None
+# Enable the pytorch profiler to capture the first N steps of training to the
+# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more
+# information. Snapshots can be visualized @ https://pytorch.org/memory_viz
+profiler_steps: int | None
+# Which step to start the profiler at. Useful for only capturing a few steps mid-run.
+profiler_steps_start: int | None = 0
+# bool of whether to report tokens per second at the end of training. This is not
+# supported with pre-training datasets.
+include_tokens_per_second: bool | None
+# bool of whether to report tokens per second per-gpu during training by measuring
+# throughput of non-padding tokens.
+include_tkps: bool | None = True
+# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to
+# add noise to embeddings. Currently only supported on Llama and Mistral
+neftune_noise_alpha: float | None
+
+# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to
+# `beta` in `ORPOConfig` due to trl mapping.
+orpo_alpha: float | None
+# Target reward margin for the SimPO loss
+simpo_gamma: float | None
+# Weight of the BC regularizer
+cpo_alpha: float | None
+
+# Factor for desirable loss term in KTO loss
+kto_desirable_weight: float | None
+# Factor for undesirable loss term in KTO loss
+kto_undesirable_weight: float | None
+# The beta parameter for the RL training
+rl_beta: float | None
+
+# Defines the max memory usage per gpu on the system. Passed through to transformers
+# when loading the model.
+max_memory: dict[int | Literal['cpu', 'disk'], int | str] | None
+# Limit the memory for all available GPUs to this amount (if an integer, expressed in
+# gigabytes); default: unset
+gpu_memory_limit: int | str | None
+# Whether to use low_cpu_mem_usage
+low_cpu_mem_usage: bool | None
+
+# The name of the chat template to use for training, following values are supported:
+# tokenizer_default: Uses the chat template that is available in the
+# tokenizer_config.json. If the chat template is not available in the tokenizer, it will
+# raise an error. This is the default value.
+# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates
+# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.
+# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.
+# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not
+# available in the tokenizer. jinja: Uses a custom jinja template for the chat template.
+# The custom jinja template should be provided in the chat_template_jinja field. The
+# selected chat template will be saved to the tokenizer_config.json for easier
+# inferencing
+chat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None
+# Custom jinja template or path to jinja file for chat template. This will be only used
+# if chat_template is set to `jinja` or `null` (in which case chat_template is
+# automatically set to `jinja`). Default is null.
+chat_template_jinja: str | None
+# Additional kwargs to pass to the chat template. This is useful for customizing the
+# chat template. For example, you can pass `thinking=False` to add a generation prompt
+# to the chat template.
+chat_template_kwargs: dict[str, Any] | None
+# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the
+# boundaries between conversation turns. For example: ['/INST', '</s>',
+# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is
+# useful for templates that use multiple delimiter tokens.
+eot_tokens: list[str] | None
+# Changes the default system message. Currently only supports chatml.
+default_system_message: str | None
+
+# Token index or indices to adjust embedding weights to the mean of the other tokens.
+# This is useful when the model has untrained embeddings.
+fix_untrained_tokens: int | list[int] | None
+
+is_preprocess: bool | None
+preprocess_iterable: bool | None
+
+# Total number of tokens - internal use
+total_num_tokens: int | None
+total_supervised_tokens: int | None
+# You can set these packing optimizations AFTER starting a training at least once. The
+# trainer will provide recommended values for these values.
+sample_packing_eff_est: float | None
+axolotl_config_path: str | None
+
+# Internal use only - Used to identify which the model is based on
+is_falcon_derived_model: bool | None
+# Internal use only - Used to identify which the model is based on
+is_llama_derived_model: bool | None
+# Internal use only - Used to identify which the model is based on. Please note that if
+# you set this to true, `padding_side` will be set to 'left' by default
+is_mistral_derived_model: bool | None
+# Internal use only - Used to identify which the model is based on
+is_qwen_derived_model: bool | None
+
+# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available
+# plugins or doc below for more details.
+# https://docs.axolotl.ai/docs/custom_integrations.html
+plugins: list[str] | None
+# Enable sample generation during training for monitoring
+generate_samples: bool | None = False
+# Number of samples to generate at each interval
+num_generation_samples: int | None = 3
+# Maximum new tokens to generate per sample
+generation_max_new_tokens: int | None = 50
+# Temperature for sample generation (0.0 = greedy)
+generation_temperature: float | None = 0.7
+# Nucleus sampling parameter for generation
+generation_top_p: float | None
+# Top-k sampling parameter for generation
+generation_top_k: int | None
+# Ratio of input to use as prompt (0.0-1.0)
+generation_prompt_ratio: float | None = 0.5
+# Whether to use sampling (vs greedy decoding)
+generation_do_sample: bool | None = True
+
+# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This
+# can also be a relative path to a model on disk
+base_model: str (required)
+# If the base_model repo on hf hub doesn't include configuration .json files, You can
+# set that here, or leave this empty to default to base_model
+base_model_config: str | None
+# transformers config class (e.g., 'LlamaConfig', 'MistralConfig'). Defaults to
+# AutoConfig.
+cls_model_config: str | None
+# Optional tokenizer configuration path in case you want to use a different tokenizer
+# than the one defined in the base model
+tokenizer_config: str | None
+# use_fast option for tokenizer loading from_pretrained, default to True
+tokenizer_use_fast: bool | None
+# Whether to use the legacy tokenizer setting, defaults to True
+tokenizer_legacy: bool | None
+# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-
+# common tokenizer.
+tokenizer_use_mistral_common: bool | None
+# Corresponding tokenizer for the model AutoTokenizer is a good choice
+tokenizer_type: str | None
+# transformers processor class
+processor_type: str | None
+# kwargs forwarded to the processor's from_pretrained(), overriding processor config
+# (e.g. image_seq_length, min_pixels, etc.).
+processor_kwargs: dict[str, Any] | None
+# Whether to save jinja files for tokenizer, transformers default is True
+tokenizer_save_jinja_files: bool | None = True
+# Trust remote code for untrusted source
+trust_remote_code: bool | None
+
+# Don't move the model to the device before sharding. Set to `false` to revert to legacy
+# behavior.
+experimental_skip_move_to_device: bool | None = True
+
+# Use custom kernels, e.g. MegaBlocks.
+use_kernels: bool | None
+
+# Model loading quantization config
+model_quantization_config: Literal['Mxfp4Config', 'FineGrainedFP8Config'] | None
+# kwargs for model quantization config
+model_quantization_config_kwargs: dict[str, Any] | None
+
+# Where to save the full-finetuned model to
+output_dir: str = ./model-out
+# push checkpoints to hub
+hub_model_id: str | None
+# how to push checkpoints to hub
+hub_strategy: str | None
+# branch/revision to push to on hub (default: main)
+hub_revision: str | None
+# Whether to save the model using safetensors format. Defaults to True.
+save_safetensors: bool | None = True
+
+# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer
+load_in_8bit: bool | None = False
+# Use bitsandbytes 4 bit
+load_in_4bit: bool | None = False
+
+# If you want to use 'lora', 'qlora', or 'llama-adapter', or leave blank to train all
+# parameters in original model
+adapter: Literal['lora', 'qlora', 'llama-adapter'] | None
+# If you already have a lora model trained that you want to load, put that here. This
+# means after training, if you want to test the model, you should set this to the value
+# of `output_dir`. Note that if you merge an adapter to the base model, a new
+# subdirectory `merged` will be created under the `output_dir`.
+lora_model_dir: str | None
+lora_r: int | None
+lora_alpha: int | None
+lora_fan_in_fan_out: bool | None
+lora_target_modules: str | list[str] | None
+lora_target_parameters: str | list[str] | None
+# If true, will target all linear modules
+lora_target_linear: bool | None
+# If you added new tokens to the tokenizer, you may need to save some LoRA modules
+# because they need to know the new tokens. For LLaMA and Mistral, you need to save
+# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts
+# tokens to embeddings, and `lm_head` converts embeddings to token probabilities.
+lora_modules_to_save: list[str] | None
+lora_dropout: float | None = 0.0
+# The layer indices to transform, otherwise, apply to all layers
+peft_layers_to_transform: list[int] | None
+peft_layers_pattern: list[str] | None
+
+peft: PeftConfig | None
+ # For PeftConfig:
+ # Configuration options for loftq initialization for LoRA
+loftq_config: LoftQConfig | None
+ # For LoftQConfig:
+ # typically 4 bits
+loftq_bits: int = 4
+
+# Whether to use DoRA.
+peft_use_dora: bool | None
+# Whether to use RSLoRA.
+peft_use_rslora: bool | None
+# List of layer indices to replicate.
+peft_layer_replication: list[tuple[int, int]] | None
+# How to initialize LoRA weights. Default to True which is MS original implementation.
+peft_init_lora_weights: bool | str | None
+# A list of token indices to fine-tune on the `embed_tokens` layer. Otherwise, a dict
+# mapping an embedding layer name to its trainable token indices. See
+# https://huggingface.co/docs/peft/v0.17.0/en/developer_guides/lora#efficiently-train-
+# tokens-alongside-lora
+peft_trainable_token_indices: list[int] | dict[str, list[int]] | None
+# Whether to tie adapter weights for tied model weights. See
+# https://github.com/huggingface/peft/issues/2864
+peft_ensure_weight_tying: bool | None
+# Whether to upcast the LoRA adapter to fp32. This is enabled by default in PEFT.
+peft_autocast_adapter_dtype: bool | None
+
+# load qlora model in sharded format for FSDP using answer.ai technique.
+qlora_sharded_model_loading: bool | None = False
+# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it
+# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge
+lora_on_cpu: bool | None
+# Whether you are training a 4-bit GPTQ quantized model
+gptq: bool | None
+# optional overrides to the bnb 4bit quantization configuration
+bnb_config_kwargs: dict[str, Any] | None
+
+# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.
+loraplus_lr_ratio: float | None
+# loraplus learning rate for lora embedding layers. Default value is 1e-6.
+loraplus_lr_embedding: float | None = 1e-06
+
+merge_lora: bool | None
+# Method to use for LoRA merging. 'memory_efficient' (default) processes shards
+# individually to reduce memory usage, 'legacy' loads the full model into memory.
+merge_method: Literal['legacy', 'memory_efficient'] | None = memory_efficient
+
+# Whether to use ReLoRA. Use with jagged_restart_*steps options.
+relora: bool | None
+# threshold for optimizer magnitude when pruning
+relora_prune_ratio: float | None
+# True to perform lora weight merges on cpu during restarts, for modest gpu memory
+# savings
+relora_cpu_offload: bool | None
+
+# how often to reset for jagged restarts
+jagged_restart_steps: int | None
+# how many warmup steps to take after reset for jagged restarts
+jagged_restart_warmup_steps: int | None
+# how many anneal steps to take before reset for jagged restarts
+jagged_restart_anneal_steps: int | None
+
+# If greater than 1, backpropagation will be skipped and the gradients will be
+# accumulated for the given number of steps.
+gradient_accumulation_steps: int | None = 1
+# The number of samples to include in each batch. This is the number of samples sent to
+# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps
+micro_batch_size: int | None = 1
+# Total batch size, we do not recommended setting this manually
+batch_size: int | None
+# per gpu micro batch size for evals, defaults to value of micro_batch_size
+eval_batch_size: int | None
+
+# whether to find batch size that fits in memory. Passed to underlying transformers
+# Trainer
+auto_find_batch_size: bool | None
+
+# Whether to mask out or include the human's prompt from the training labels
+train_on_inputs: bool | None = False
+# Group similarly sized data to minimize padding. May be slower to start, as it must
+# download and sort the entire dataset. Note that training loss may have an oscillating
+# pattern with this enabled.
+group_by_length: bool | None
+
+learning_rate: str | float (required)
+embedding_lr: float | None
+embedding_lr_scale: float | None
+# Specify weight decay
+weight_decay: float | None = 0.0
+# Specify optimizer
+optimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED
+# Dictionary of arguments to pass to the optimizer
+optim_args: str | dict[str, Any] | None
+# The target modules to optimize, i.e. the module names that you would like to train,
+# right now this is used only for GaLore algorithm
+optim_target_modules: list[str] | Literal['all_linear'] | None
+# Path to torch distx for optim 'adamw_anyprecision'
+torchdistx_path: str | None
+lr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE
+# Specify a scheduler and kwargs to use with the optimizer
+lr_scheduler_kwargs: dict[str, Any] | None
+lr_quadratic_warmup: bool | None
+# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of
+# peak lr
+cosine_min_lr_ratio: float | None
+# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means
+# start cosine_min_lr at 80% of training step
+cosine_constant_lr_ratio: float | None
+# Learning rate div factor
+lr_div_factor: float | None
+
+lr_groups: list[LrGroup] | None
+ # For LrGroup:
+name: str (required)
+modules: list[str] (required)
+lr: float (required)
+
+# adamw hyperparams
+adam_epsilon: float | None
+# only used for CAME Optimizer
+adam_epsilon2: float | None
+# adamw hyperparams
+adam_beta1: float | None
+# adamw hyperparams
+adam_beta2: float | None
+# only used for CAME Optimizer
+adam_beta3: float | None
+
+# Dion Optimizer learning rate
+dion_lr: float | None
+# Dion Optimizer momentum
+dion_momentum: float | None
+# Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank
+# dimension.
+dion_rank_fraction: float | None = 1.0
+# Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may
+# be useful to ensure even sharding.
+dion_rank_multiple_of: int | None = 1
+
+# Gradient clipping max norm
+max_grad_norm: float | None
+num_epochs: float = 1.0
+
+use_wandb: bool | None
+# Set the name of your wandb run
+wandb_name: str | None
+# Set the ID of your wandb run
+wandb_run_id: str | None
+# "offline" to save run metadata locally and not sync to the server, "disabled" to turn
+# off wandb
+wandb_mode: str | None
+# Your wandb project name
+wandb_project: str | None
+# A wandb Team name if using a Team
+wandb_entity: str | None
+wandb_watch: str | None
+# "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only
+# at the end of training
+wandb_log_model: str | None
+
+use_mlflow: bool | None
+# URI to mlflow
+mlflow_tracking_uri: str | None
+# Your experiment name
+mlflow_experiment_name: str | None
+# Your run name
+mlflow_run_name: str | None
+# set to true to copy each saved checkpoint on each save to mlflow artifact registry
+hf_mlflow_log_artifacts: bool | None
+
+# Enable or disable Comet integration.
+use_comet: bool | None
+# API key for Comet. Recommended to set via `comet login`.
+comet_api_key: str | None
+# Workspace name in Comet. Defaults to the user's default workspace.
+comet_workspace: str | None
+# Project name in Comet. Defaults to Uncategorized.
+comet_project_name: str | None
+# Identifier for the experiment. Used to append data to an existing experiment or
+# control the key of new experiments. Default to a random key.
+comet_experiment_key: str | None
+# Create a new experiment ("create") or log to an existing one ("get"). Default
+# ("get_or_create") auto-selects based on configuration.
+comet_mode: str | None
+# Set to True to log data to Comet server, or False for offline storage. Default is
+# True.
+comet_online: bool | None
+# Dictionary for additional configuration settings, see the doc for more details.
+comet_experiment_config: dict[str, Any] | None
+
+use_trackio: bool | None
+# Your trackio project name
+trackio_project_name: str | None
+# Set the name of your trackio run
+trackio_run_name: str | None
+# Hugging Face Space ID to sync dashboard to (optional, runs locally if not provided)
+trackio_space_id: str | None
+
+# Enable OpenTelemetry metrics collection and Prometheus export
+use_otel_metrics: bool | None = False
+# Host to bind the OpenTelemetry metrics server to
+otel_metrics_host: str | None = localhost
+# Port for the Prometheus metrics HTTP server
+otel_metrics_port: int | None = 8000
+
+# the number of activate layers in LISA
+lisa_n_layers: int | None
+# how often to switch layers in LISA
+lisa_step_interval: int | None
+# path under the model to access the layers
+lisa_layers_attribute: str | None = model.layers
+
+gradio_title: str | None
+gradio_share: bool | None
+gradio_server_name: str | None
+gradio_server_port: int | None
+gradio_max_new_tokens: int | None
+gradio_temperature: float | None
+
+use_ray: bool = False
+ray_run_name: str | None
+ray_num_workers: int = 1
+resources_per_worker: dict
+
+# The size of the image to resize to. It can be an integer (resized into padded-square
+# image) or a tuple (width, height).If not provided, we will attempt to load from
+# preprocessor.size, otherwise, images won't be resized.
+image_size: int | tuple[int, int] | None
+# The resampling algorithm to use for image resizing. Default is bilinear. Please refer
+# to PIL.Image.Resampling for more details.
+image_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None
+
+# optional overrides to the base model configuration
+overrides_of_model_config: dict[str, Any] | None
+# optional overrides the base model loading from_pretrained
+overrides_of_model_kwargs: dict[str, Any] | None
+# If you want to specify the type of model to load, AutoModelForCausalLM is a good
+# choice too
+type_of_model: str | None
+# You can specify to choose a specific model revision from huggingface hub
+revision_of_model: str | None
+
+max_packed_sequence_len: int | None
+rope_scaling: Any | None
+noisy_embedding_alpha: float | None
+dpo_beta: float | None
+evaluation_strategy: str | None
+eval_table_size: int | None
+eval_max_new_tokens: int | None
+dpo_use_logits_to_keep: bool | None
+dpo_generate_during_eval: bool | None
+dpo_norm_loss: bool | None
+rpo_alpha: float | None
diff --git a/docs/custom_integrations.html b/docs/custom_integrations.html
index ca3efc1b9..f24f87f43 100644
--- a/docs/custom_integrations.html
+++ b/docs/custom_integrations.html
@@ -1712,7 +1712,7 @@ The quick brown fox jumps over the loud dog
kd_alpha:0.9kd_temperature:1.0
-torch_compile:True # torch>=2.6.0, recommended to reduce vram
+torch_compile:True # recommended to reduce vramdatasets:-path: ...
diff --git a/docs/debugging.html b/docs/debugging.html
index 35530faf5..05bf04b95 100644
--- a/docs/debugging.html
+++ b/docs/debugging.html
@@ -914,8 +914,9 @@ If you prefer to watch a video, rather than read, you can skip to the Setup
Make sure you have an editable install of Axolotl, which ensures that changes you make to the code are reflected at runtime. Run the following commands from the root of this project:
exportUV_TORCH_BACKEND=cu128 # or cu130
-uv sync --extra flash-attn --extra deepspeed --group dev --group test
-source .venv/bin/activate
+uv venv --no-project--relocatable
+source .venv/bin/activate
+uv pip install --no-build-isolation-e'.[deepspeed]'--group dev --group test
Remote Hosts
If you developing on a remote host, you can easily use VSCode to debug remotely. To do so, you will need to follow this remote - SSH guide. You can also see the video below on Docker and Remote SSH debugging.
@@ -1035,8 +1036,9 @@ If you already have axolotl cloned on your host, make sure you have the latest c
To understand which containers are available, see the Docker section of the README and the DockerHub repo. For details of how the Docker containers are built, see axolotl’s Docker CI builds.
You will now be in the container. Next, install Axolotl with dev dependencies:
-
uv sync --extra flash-attn --extra deepspeed --group dev --group test
-source .venv/bin/activate
+
uv venv --no-project--relocatable
+source .venv/bin/activate
+uv pip install --no-build-isolation-e'.[deepspeed]'--group dev --group test
For Blackwell GPUs, please use the tags with PyTorch 2.9.1 and CUDA 12.8.
-
-
-
-
-
-
-
-
-Tip
-
-
-
-
Each image below is available in a uv variant that uses uv with
-a relocatable venv (/workspace/axolotl-venv) instead of Miniconda + pip. Append -uv to the image name
-(e.g. axolotlai/axolotl-base-uv). Tags follow the same format. We recommend the uv images for new deployments.
+
Each image below ships a uv variant that uses uv with a relocatable venv
+(/workspace/axolotl-venv) instead of Miniconda + pip. Append -uv to the image name
+(e.g. axolotlai/axolotl-uv, axolotlai/axolotl-base-uv, axolotlai/axolotl-cloud-uv). Tags follow the
+same format as their non-uv counterparts.
+
We recommend switching to the -uv images early. In the near future we will publish the uv-based
+build to the non-uv tags as well. The non-uv names will continue to work, but they will start serving
+the uv image.
A: We currently recommend torch 2.6.0 for use with vllm. Please ensure you use the right version. For Docker, please use the main-py3.11-cu124-2.6.0 tag.
+
A: We currently recommend torch 2.10 for use with vllm. Please ensure you use the right version. For Docker, please use the main-py3.12-cu128-2.10.0 tag (note: torch 2.10 images are built with Python 3.12).
Q: FA2 2.8.0 undefined symbol runtime error on CUDA 12.4
# Ensure you have a compatible version of Pytorch installed
-uv pip install --no-build-isolation'axolotl[flash-attn]>=0.12.0'
+uv pip install --no-build-isolation'axolotl>=0.16.1'# Install Cut Cross Entropypython scripts/cutcrossentropy_install.py |sh
Axolotl is easy to install from pip, or use our pre-built Docker images for a hassle free dependency experience. See our docs for more information.
-
%%capture
-# This step can take ~5-10 minutes to install dependencies
-!pip install --no-build-isolation axolotl[flash-attn]>=0.9.1
-!pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@fec1a88"
+
%%capture# This step can take ~5-10 minutes to install dependencies!pip install --no-build-isolation "axolotl>=0.16.1"!pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@fec1a88"
Demo: Talk Like a Pirate
diff --git a/search.json b/search.json
index 24049d6c3..ebedb8721 100644
--- a/search.json
+++ b/search.json
@@ -56,7 +56,7 @@
"href": "docs/models/seed-oss.html#getting-started",
"title": "Seed-OSS",
"section": "Getting started",
- "text": "Getting started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n# Ensure you have a compatible version of Pytorch installed\nuv pip install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'\n\n# Install Cut Cross Entropy\npython scripts/cutcrossentropy_install.py | sh\nRun the finetuning example:\n\naxolotl train examples/seed-oss/seed-oss-36b-qlora.yaml\nThis config uses about 27.7 GiB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nTIPS\n\nFor inference, the official Seed Team recommends top_p=0.95 and temperature=1.1.\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.",
+ "text": "Getting started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n# Ensure you have a compatible version of Pytorch installed\nuv pip install --no-build-isolation 'axolotl>=0.16.1'\n\n# Install Cut Cross Entropy\npython scripts/cutcrossentropy_install.py | sh\nRun the finetuning example:\n\naxolotl train examples/seed-oss/seed-oss-36b-qlora.yaml\nThis config uses about 27.7 GiB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nTIPS\n\nFor inference, the official Seed Team recommends top_p=0.95 and temperature=1.1.\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.",
"crumbs": [
"Getting Started",
"Model Guides",
@@ -152,7 +152,7 @@
"href": "docs/models/apertus.html#getting-started",
"title": "Apertus",
"section": "Getting started",
- "text": "Getting started\n\nInstall Axolotl following the installation guide. You need to install from main as Apertus is only on nightly or use our latest Docker images.\nHere is an example of how to install from main for pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.6.0 min)\ngit clone https://github.com/axolotl-ai-cloud/axolotl.git\ncd axolotl\n\nuv pip install --no-build-isolation -e '.[flash-attn]'\n\n# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy\npython scripts/cutcrossentropy_install.py | sh\n\n(Optional, highly recommended) Install XIELU CUDA\n\n## Recommended for reduced VRAM and faster speeds\n\n# Point to CUDA toolkit directory\n# For those using our Docker image, use the below path.\nexport CUDA_HOME=/usr/local/cuda\n\nuv pip install git+https://github.com/nickjbrowning/XIELU@59d6031 --no-build-isolation --no-deps\nFor any installation errors, see XIELU Installation Issues\n\nRun the finetuning example:\n\naxolotl train examples/apertus/apertus-8b-qlora.yaml\nThis config uses about 8.7 GiB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nTips\n\nFor inference, the official Apertus team recommends top_p=0.9 and temperature=0.8.\nYou can instead use full paremter fine-tuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.\n\n\n\nXIELU Installation Issues\n\nModuleNotFoundError: No module named 'torch'\nPlease check these one by one:\n- Running in correct environment\n- Env has PyTorch installed\n- CUDA toolkit is at CUDA_HOME\nIf those didn’t help, please try the below solutions:\n\nPass env for CMAKE and try install again:\nPython_EXECUTABLE=$(which python) uv pip install git+https://github.com/nickjbrowning/XIELU@59d6031 --no-build-isolation --no-deps\nGit clone the repo and manually hardcode python path:\ngit clone https://github.com/nickjbrowning/XIELU\ncd xielu\ngit checkout 59d6031\n\ncd xielu\nnano CMakeLists.txt # or vi depending on your preference\nexecute_process(\n- COMMAND ${Python_EXECUTABLE} -c \"import torch.utils; print(torch.utils.cmake_prefix_path)\"\n+ COMMAND /root/miniconda3/envs/py3.11/bin/python -c \"import torch.utils; print(torch.utils.cmake_prefix_path)\"\n RESULT_VARIABLE TORCH_CMAKE_PATH_RESULT\n OUTPUT_VARIABLE TORCH_CMAKE_PATH_OUTPUT\n ERROR_VARIABLE TORCH_CMAKE_PATH_ERROR\n)\nuv pip install . --no-build-isolation --no-deps",
+ "text": "Getting started\n\nInstall Axolotl following the installation guide. You need to install from main as Apertus is only on nightly or use our latest Docker images.\nHere is an example of how to install from main for pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.9.1 min)\ngit clone https://github.com/axolotl-ai-cloud/axolotl.git\ncd axolotl\n\nuv pip install --no-build-isolation -e '.'\n\n# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy\npython scripts/cutcrossentropy_install.py | sh\n\n(Optional, highly recommended) Install XIELU CUDA\n\n## Recommended for reduced VRAM and faster speeds\n\n# Point to CUDA toolkit directory\n# For those using our Docker image, use the below path.\nexport CUDA_HOME=/usr/local/cuda\n\nuv pip install git+https://github.com/nickjbrowning/XIELU@59d6031 --no-build-isolation --no-deps\nFor any installation errors, see XIELU Installation Issues\n\nRun the finetuning example:\n\naxolotl train examples/apertus/apertus-8b-qlora.yaml\nThis config uses about 8.7 GiB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nTips\n\nFor inference, the official Apertus team recommends top_p=0.9 and temperature=0.8.\nYou can instead use full paremter fine-tuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.\n\n\n\nXIELU Installation Issues\n\nModuleNotFoundError: No module named 'torch'\nPlease check these one by one:\n- Running in correct environment\n- Env has PyTorch installed\n- CUDA toolkit is at CUDA_HOME\nIf those didn’t help, please try the below solutions:\n\nPass env for CMAKE and try install again:\nPython_EXECUTABLE=$(which python) uv pip install git+https://github.com/nickjbrowning/XIELU@59d6031 --no-build-isolation --no-deps\nGit clone the repo and manually hardcode python path:\ngit clone https://github.com/nickjbrowning/XIELU\ncd xielu\ngit checkout 59d6031\n\ncd xielu\nnano CMakeLists.txt # or vi depending on your preference\nexecute_process(\n- COMMAND ${Python_EXECUTABLE} -c \"import torch.utils; print(torch.utils.cmake_prefix_path)\"\n+ COMMAND /root/miniconda3/envs/py3.11/bin/python -c \"import torch.utils; print(torch.utils.cmake_prefix_path)\"\n RESULT_VARIABLE TORCH_CMAKE_PATH_RESULT\n OUTPUT_VARIABLE TORCH_CMAKE_PATH_OUTPUT\n ERROR_VARIABLE TORCH_CMAKE_PATH_ERROR\n)\nuv pip install . --no-build-isolation --no-deps",
"crumbs": [
"Getting Started",
"Model Guides",
@@ -212,7 +212,7 @@
"href": "docs/models/smolvlm2.html#getting-started",
"title": "SmolVLM 2",
"section": "Getting Started",
- "text": "Getting Started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n# Ensure you have a compatible version of Pytorch installed\nuv pip install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'\nInstall an extra dependency:\nuv pip install num2words==0.5.14\nRun the finetuning example:\n# LoRA SFT (1x48GB @ 6.8GiB)\naxolotl train examples/smolvlm2/smolvlm2-2B-lora.yaml",
+ "text": "Getting Started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n# Ensure you have a compatible version of Pytorch installed\nuv pip install --no-build-isolation 'axolotl>=0.16.1'\nInstall an extra dependency:\nuv pip install num2words==0.5.14\nRun the finetuning example:\n# LoRA SFT (1x48GB @ 6.8GiB)\naxolotl train examples/smolvlm2/smolvlm2-2B-lora.yaml",
"crumbs": [
"Getting Started",
"Model Guides",
@@ -272,7 +272,7 @@
"href": "docs/models/arcee.html#getting-started",
"title": "Arcee AFM",
"section": "Getting started",
- "text": "Getting started\n\nInstall Axolotl following the installation guide. You need to install from main as AFM is only on nightly or use our latest Docker images.\nHere is an example of how to install from main for pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.6.0 min)\ngit clone https://github.com/axolotl-ai-cloud/axolotl.git\ncd axolotl\n\nuv pip install --no-build-isolation -e '.[flash-attn]'\n\n# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy\npython scripts/cutcrossentropy_install.py | sh\n\nRun the finetuning example:\n\naxolotl train examples/arcee/afm-4.5b-qlora.yaml\nThis config uses about 7.8GiB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nTIPS\n\nFor inference, the official Arcee.ai team recommends top_p: 0.95, temperature: 0.5, top_k: 50, and repeat_penalty: 1.1.\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.",
+ "text": "Getting started\n\nInstall Axolotl following the installation guide. You need to install from main as AFM is only on nightly or use our latest Docker images.\nHere is an example of how to install from main for pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.9.1 min)\ngit clone https://github.com/axolotl-ai-cloud/axolotl.git\ncd axolotl\n\nuv pip install --no-build-isolation -e '.'\n\n# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy\npython scripts/cutcrossentropy_install.py | sh\n\nRun the finetuning example:\n\naxolotl train examples/arcee/afm-4.5b-qlora.yaml\nThis config uses about 7.8GiB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nTIPS\n\nFor inference, the official Arcee.ai team recommends top_p: 0.95, temperature: 0.5, top_k: 50, and repeat_penalty: 1.1.\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.",
"crumbs": [
"Getting Started",
"Model Guides",
@@ -565,7 +565,7 @@
"href": "docs/models/hunyuan.html#getting-started",
"title": "Hunyuan",
"section": "Getting started",
- "text": "Getting started\n\nInstall Axolotl following the installation guide. You need to install from main as HunYuan is only on nightly or use our latest Docker images.\nHere is an example of how to install from main for pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.6.0 min)\ngit clone https://github.com/axolotl-ai-cloud/axolotl.git\ncd axolotl\n\nuv pip install --no-build-isolation -e '.[flash-attn]'\n\n# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy\npython scripts/cutcrossentropy_install.py | sh\n\nRun the finetuning example:\n\naxolotl train examples/hunyuan/hunyuan-v1-dense-qlora.yaml\nThis config uses about 4.7 GB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nDataset\nHunYuan Instruct models can choose to enter a slow think or fast think pattern. For best performance on fine-tuning their Instruct models, your dataset should be adjusted to match their pattern.\n# fast think pattern\nmessages = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"/no_think What color is the sun?\" },\n {\"role\": \"assistant\", \"content\": \"<think>\\n\\n</think>\\n<answer>\\nThe sun is yellow.\\n</answer>\"}\n]\n\n# slow think pattern\nmessages = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"/no_think What color is the sun?\" },\n {\"role\": \"assistant\", \"content\": \"<think>\\nThe user is asking about the color of the sun. I need to ...\\n</think>\\n<answer>\\nThe sun is yellow.\\n</answer>\"}\n]\n\n\nTIPS\n\nFor inference, the official Tencent team recommends\n\n\n{\n \"do_sample\": true,\n \"top_k\": 20,\n \"top_p\": 0.8,\n \"repetition_penalty\": 1.05,\n \"temperature\": 0.7\n}\n\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.",
+ "text": "Getting started\n\nInstall Axolotl following the installation guide. You need to install from main as HunYuan is only on nightly or use our latest Docker images.\nHere is an example of how to install from main for pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.9.1 min)\ngit clone https://github.com/axolotl-ai-cloud/axolotl.git\ncd axolotl\n\nuv pip install --no-build-isolation -e '.'\n\n# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy\npython scripts/cutcrossentropy_install.py | sh\n\nRun the finetuning example:\n\naxolotl train examples/hunyuan/hunyuan-v1-dense-qlora.yaml\nThis config uses about 4.7 GB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nDataset\nHunYuan Instruct models can choose to enter a slow think or fast think pattern. For best performance on fine-tuning their Instruct models, your dataset should be adjusted to match their pattern.\n# fast think pattern\nmessages = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"/no_think What color is the sun?\" },\n {\"role\": \"assistant\", \"content\": \"<think>\\n\\n</think>\\n<answer>\\nThe sun is yellow.\\n</answer>\"}\n]\n\n# slow think pattern\nmessages = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"/no_think What color is the sun?\" },\n {\"role\": \"assistant\", \"content\": \"<think>\\nThe user is asking about the color of the sun. I need to ...\\n</think>\\n<answer>\\nThe sun is yellow.\\n</answer>\"}\n]\n\n\nTIPS\n\nFor inference, the official Tencent team recommends\n\n\n{\n \"do_sample\": true,\n \"top_k\": 20,\n \"top_p\": 0.8,\n \"repetition_penalty\": 1.05,\n \"temperature\": 0.7\n}\n\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.",
"crumbs": [
"Getting Started",
"Model Guides",
@@ -685,7 +685,7 @@
"href": "docs/models/gemma3n.html#getting-started",
"title": "Gemma 3n",
"section": "Getting started",
- "text": "Getting started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.6.0 min)\nuv pip install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'\n\nIn addition to Axolotl’s requirements, Gemma-3n requires:\n\nuv pip install timm==1.0.17\n\n# for loading audio data\nuv pip install librosa==0.11.0\n\nDownload sample dataset files\n\n# for text + vision + audio only\nwget https://huggingface.co/datasets/Nanobit/text-vision-audio-2k-test/resolve/main/African_elephant.jpg\nwget https://huggingface.co/datasets/Nanobit/text-vision-audio-2k-test/resolve/main/En-us-African_elephant.oga\n\nRun the finetuning example:\n\n# text only\naxolotl train examples/gemma3n/gemma-3n-e2b-qlora.yml\n\n# text + vision\naxolotl train examples/gemma3n/gemma-3n-e2b-vision-qlora.yml\n\n# text + vision + audio\naxolotl train examples/gemma3n/gemma-3n-e2b-vision-audio-qlora.yml\nLet us know how it goes. Happy finetuning! 🚀\nWARNING: The loss and grad norm will be much higher than normal. We suspect this to be inherent to the model as of the moment. If anyone would like to submit a fix for this, we are happy to take a look.\n\nTIPS\n\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe text dataset format follows the OpenAI Messages format as seen here.\nThe multimodal dataset format follows the OpenAI multi-content Messages format as seen here.",
+ "text": "Getting started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.9.1 min)\nuv pip install --no-build-isolation 'axolotl>=0.16.1'\n\nIn addition to Axolotl’s requirements, Gemma-3n requires:\n\nuv pip install timm==1.0.17\n\n# for loading audio data\nuv pip install librosa==0.11.0\n\nDownload sample dataset files\n\n# for text + vision + audio only\nwget https://huggingface.co/datasets/Nanobit/text-vision-audio-2k-test/resolve/main/African_elephant.jpg\nwget https://huggingface.co/datasets/Nanobit/text-vision-audio-2k-test/resolve/main/En-us-African_elephant.oga\n\nRun the finetuning example:\n\n# text only\naxolotl train examples/gemma3n/gemma-3n-e2b-qlora.yml\n\n# text + vision\naxolotl train examples/gemma3n/gemma-3n-e2b-vision-qlora.yml\n\n# text + vision + audio\naxolotl train examples/gemma3n/gemma-3n-e2b-vision-audio-qlora.yml\nLet us know how it goes. Happy finetuning! 🚀\nWARNING: The loss and grad norm will be much higher than normal. We suspect this to be inherent to the model as of the moment. If anyone would like to submit a fix for this, we are happy to take a look.\n\nTIPS\n\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe text dataset format follows the OpenAI Messages format as seen here.\nThe multimodal dataset format follows the OpenAI multi-content Messages format as seen here.",
"crumbs": [
"Getting Started",
"Model Guides",
@@ -733,7 +733,7 @@
"href": "docs/models/devstral.html#getting-started",
"title": "Devstral",
"section": "Getting started",
- "text": "Getting started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.6.0 min)\nuv pip install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'\n\nInstall Cut Cross Entropy to reduce training VRAM usage\n\npython scripts/cutcrossentropy_install.py | sh\n\nRun the finetuning example:\n\naxolotl train examples/devstral/devstral-small-qlora.yml\nThis config uses about 21GB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nTIPS\n\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.\nLearn how to use function calling with Axolotl at docs.",
+ "text": "Getting started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.9.1 min)\nuv pip install --no-build-isolation 'axolotl>=0.16.1'\n\nInstall Cut Cross Entropy to reduce training VRAM usage\n\npython scripts/cutcrossentropy_install.py | sh\n\nRun the finetuning example:\n\naxolotl train examples/devstral/devstral-small-qlora.yml\nThis config uses about 21GB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nTIPS\n\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.\nLearn how to use function calling with Axolotl at docs.",
"crumbs": [
"Getting Started",
"Model Guides",
@@ -2615,7 +2615,7 @@
"href": "docs/custom_integrations.html#knowledge-distillation-kd",
"title": "Custom Integrations",
"section": "Knowledge Distillation (KD)",
- "text": "Knowledge Distillation (KD)\n\nUsage\nplugins:\n - \"axolotl.integrations.kd.KDPlugin\"\n\nkd_trainer: True\nkd_ce_alpha: 0.1\nkd_alpha: 0.9\nkd_temperature: 1.0\n\ntorch_compile: True # torch>=2.6.0, recommended to reduce vram\n\ndatasets:\n - path: ...\n type: \"axolotl.integrations.kd.chat_template\"\n field_messages: \"messages_combined\"\n logprobs_field: \"llm_text_generation_vllm_logprobs\" # for kd only, field of logprobs\nAn example dataset can be found at axolotl-ai-co/evolkit-logprobs-pipeline-75k-v2-sample\nPlease see reference here",
+ "text": "Knowledge Distillation (KD)\n\nUsage\nplugins:\n - \"axolotl.integrations.kd.KDPlugin\"\n\nkd_trainer: True\nkd_ce_alpha: 0.1\nkd_alpha: 0.9\nkd_temperature: 1.0\n\ntorch_compile: True # recommended to reduce vram\n\ndatasets:\n - path: ...\n type: \"axolotl.integrations.kd.chat_template\"\n field_messages: \"messages_combined\"\n logprobs_field: \"llm_text_generation_vllm_logprobs\" # for kd only, field of logprobs\nAn example dataset can be found at axolotl-ai-co/evolkit-logprobs-pipeline-75k-v2-sample\nPlease see reference here",
"crumbs": [
"Advanced Features",
"Custom Integrations"
@@ -3388,7 +3388,7 @@
"href": "docs/installation.html#sec-requirements",
"title": "Installation",
"section": "1 Requirements",
- "text": "1 Requirements\n\nNVIDIA GPU (Ampere architecture or newer for bf16 and Flash Attention) or AMD GPU\nPython ≥3.11\nPyTorch ≥2.9.0",
+ "text": "1 Requirements\n\nNVIDIA GPU (Ampere architecture or newer for bf16 and Flash Attention) or AMD GPU\nPython ≥3.11\nPyTorch ≥2.9.1",
"crumbs": [
"Getting Started",
"Installation"
@@ -3399,7 +3399,7 @@
"href": "docs/installation.html#sec-installation",
"title": "Installation",
"section": "2 Installation",
- "text": "2 Installation\n\n\n\n\n\n\nImportant\n\n\n\nFor Blackwell GPUs, please use Pytorch 2.9.1 and CUDA 12.8.\n\n\n\n2.1 Quick Install\nAxolotl uses uv as its package manager. uv is a fast, reliable Python package installer and resolver built in Rust.\nInstall uv if not already installed:\ncurl -LsSf https://astral.sh/uv/install.sh | sh\nsource $HOME/.local/bin/env\nChoose your CUDA version (e.g. cu128, cu130), create a venv, and install:\nexport UV_TORCH_BACKEND=cu128 # or cu130\nuv venv --no-project --relocatable\nsource .venv/bin/activate\nuv pip install --no-build-isolation axolotl[flash-attn,deepspeed]\n\n\n2.2 Edge/Development Build\nFor the latest features between releases:\ngit clone https://github.com/axolotl-ai-cloud/axolotl.git\ncd axolotl\nexport UV_TORCH_BACKEND=cu128 # or cu130\nuv sync --extra flash-attn --extra deepspeed\nsource .venv/bin/activate\nuv sync creates a .venv, installs exact pinned versions from uv.lock, and sets up an editable install automatically.\n\n\n2.3 Docker\ndocker run --gpus '\"all\"' --rm -it --ipc=host axolotlai/axolotl-uv:main-latest\nFor development with Docker:\ndocker compose up -d\n\n\n\n\n\n\nTipAdvanced Docker Configuration\n\n\n\ndocker run --privileged --gpus '\"all\"' --shm-size 10g --rm -it \\\n --name axolotl --ipc=host \\\n --ulimit memlock=-1 --ulimit stack=67108864 \\\n --mount type=bind,src=\"${PWD}\",target=/workspace/axolotl \\\n -v ${HOME}/.cache/huggingface:/root/.cache/huggingface \\\n axolotlai/axolotl-uv:main-latest\n\n\n\n\n\n\n\n\nImportant\n\n\n\nFor Blackwell GPUs, please use axolotlai/axolotl-uv:main-py3.11-cu128-2.9.1 or the cloud variant axolotlai/axolotl-cloud-uv:main-py3.11-cu128-2.9.1.\n\n\nPlease refer to the Docker documentation for more information on the different Docker images that are available.",
+ "text": "2 Installation\n\n\n\n\n\n\nImportant\n\n\n\nFor Blackwell GPUs, please use Pytorch 2.9.1 and CUDA 12.8.\n\n\n\n2.1 Quick Install\nAxolotl uses uv as its package manager. uv is a fast, reliable Python package installer and resolver built in Rust.\nInstall uv if not already installed:\ncurl -LsSf https://astral.sh/uv/install.sh | sh\nsource $HOME/.local/bin/env\nChoose your CUDA version (e.g. cu128, cu130), create a venv, and install:\nexport UV_TORCH_BACKEND=cu128 # or cu130\nuv venv\nsource .venv/bin/activate\nuv pip install --no-build-isolation axolotl[deepspeed]\n\n\n2.2 Edge/Development Build\nFor the latest features between releases:\ngit clone https://github.com/axolotl-ai-cloud/axolotl.git\ncd axolotl\nexport UV_TORCH_BACKEND=cu128 # or cu130\nuv venv\nsource .venv/bin/activate\nuv pip install --no-build-isolation -e '.[deepspeed]'\n\n\n2.3 Docker\ndocker run --gpus '\"all\"' --rm -it --ipc=host axolotlai/axolotl-uv:main-latest\nFor development with Docker:\ndocker compose up -d\n\n\n\n\n\n\nTipAdvanced Docker Configuration\n\n\n\ndocker run --privileged --gpus '\"all\"' --shm-size 10g --rm -it \\\n --name axolotl --ipc=host \\\n --ulimit memlock=-1 --ulimit stack=67108864 \\\n --mount type=bind,src=\"${PWD}\",target=/workspace/axolotl \\\n -v ${HOME}/.cache/huggingface:/root/.cache/huggingface \\\n axolotlai/axolotl-uv:main-latest\n\n\n\n\n\n\n\n\nImportant\n\n\n\nFor Blackwell GPUs, please use axolotlai/axolotl-uv:main-py3.11-cu128-2.9.1 or the cloud variant axolotlai/axolotl-cloud-uv:main-py3.11-cu128-2.9.1.\n\n\nPlease refer to the Docker documentation for more information on the different Docker images that are available.",
"crumbs": [
"Getting Started",
"Installation"
@@ -3432,7 +3432,7 @@
"href": "docs/installation.html#sec-migrating",
"title": "Installation",
"section": "5 Migrating from pip to uv",
- "text": "5 Migrating from pip to uv\nIf you have an existing pip-based Axolotl installation, you can migrate to uv:\n# Install uv\ncurl -LsSf https://astral.sh/uv/install.sh | sh\nsource $HOME/.local/bin/env\n\n# Create a fresh venv (recommended for a clean start)\nexport UV_TORCH_BACKEND=cu128 # or cu130\nuv venv --no-project --relocatable\nsource .venv/bin/activate\n\n# Reinstall axolotl\nuv pip install --no-build-isolation axolotl[flash-attn,deepspeed]",
+ "text": "5 Migrating from pip to uv\nIf you have an existing pip-based Axolotl installation, you can migrate to uv:\n# Install uv\ncurl -LsSf https://astral.sh/uv/install.sh | sh\nsource $HOME/.local/bin/env\n\n# Create a fresh venv (recommended for a clean start)\nexport UV_TORCH_BACKEND=cu128 # or cu130\nuv venv\nsource .venv/bin/activate\n\n# Reinstall axolotl\nuv pip install --no-build-isolation axolotl[deepspeed]",
"crumbs": [
"Getting Started",
"Installation"
@@ -3443,7 +3443,7 @@
"href": "docs/installation.html#sec-pip",
"title": "Installation",
"section": "6 Using pip (Alternative)",
- "text": "6 Using pip (Alternative)\nIf you are unable to install uv, you can still use pip directly.\n\n\n\n\n\n\nImportant\n\n\n\nPlease make sure to have PyTorch installed before installing Axolotl with pip.\nFollow the instructions at: https://pytorch.org/get-started/locally/\n\n\npip3 install -U packaging setuptools wheel ninja\npip3 install --no-build-isolation axolotl[flash-attn,deepspeed]\nFor editable/development installs:\npip3 install -U packaging setuptools wheel ninja\npip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'",
+ "text": "6 Using pip (Alternative)\nIf you are unable to install uv, you can still use pip directly.\n\n\n\n\n\n\nImportant\n\n\n\nPlease make sure to have PyTorch installed before installing Axolotl with pip.\nFollow the instructions at: https://pytorch.org/get-started/locally/\n\n\npip3 install -U packaging setuptools wheel ninja\npip3 install --no-build-isolation axolotl[deepspeed]\nFor editable/development installs:\npip3 install -U packaging setuptools wheel ninja\npip3 install --no-build-isolation -e '.[deepspeed]'",
"crumbs": [
"Getting Started",
"Installation"
@@ -3799,7 +3799,7 @@
"href": "docs/docker.html#main",
"title": "Docker",
"section": "Main",
- "text": "Main\nThe main image is the image that is used to run Axolotl. It is based on the axolotlai/axolotl-base image and includes the Axolotl codebase, dependencies, and more.\n\nImage\n\n\n\nVariant\nImage\nDocker Hub\n\n\n\n\npip\naxolotlai/axolotl\nLink\n\n\nuv\naxolotlai/axolotl-uv\nLink\n\n\n\n\n\nTags format\n# on push to main\nmain-py{python_version}-cu{cuda_version}-{pytorch_version}\n\n# latest main (currently torch 2.9.1, python 3.11, cuda 12.8)\nmain-latest\n\n# nightly build\n{branch}-{date_in_YYYYMMDD}-py{python_version}-cu{cuda_version}-{pytorch_version}\n\n# tagged release\n{version}\n\n\n\n\n\n\nTip\n\n\n\nThere may be some extra tags appended to the image, like -vllm which installs those packages.\n\n\nTags examples:\n\nmain-py3.11-cu128-2.9.1\nmain-py3.12-cu128-2.10.0\nmain-py3.12-cu130-2.9.1\nmain-py3.12-cu130-2.10.0\nmain-latest\nmain-20260315-py3.11-cu128-2.9.1\n0.12.0",
+ "text": "Main\nThe main image is the image that is used to run Axolotl. It is based on the axolotlai/axolotl-base image and includes the Axolotl codebase, dependencies, and more.\n\nImage\n\n\n\nVariant\nImage\nDocker Hub\n\n\n\n\npip\naxolotlai/axolotl\nLink\n\n\nuv\naxolotlai/axolotl-uv\nLink\n\n\n\n\n\nTags format\n# on push to main\nmain-py{python_version}-cu{cuda_version}-{pytorch_version}\n\n# latest main (currently torch 2.9.1, python 3.11, cuda 12.8)\nmain-latest\n\n# nightly build\n{branch}-{date_in_YYYYMMDD}-py{python_version}-cu{cuda_version}-{pytorch_version}\n\n# tagged release\n{version}\n\n\n\n\n\n\nTip\n\n\n\nThere may be some extra tags appended to the image, like -vllm which installs those packages.\n\n\nTags examples:\n\nmain-py3.11-cu128-2.9.1\nmain-py3.12-cu128-2.10.0\nmain-py3.12-cu130-2.9.1\nmain-py3.12-cu130-2.10.0\nmain-latest\nmain-20260315-py3.11-cu128-2.9.1\n0.16.1",
"crumbs": [
"Deployments",
"Docker"
@@ -4934,14 +4934,14 @@
"href": "docs/api/core.builders.rl.html",
"title": "core.builders.rl",
"section": "",
- "text": "core.builders.rl\nBuilder for RLHF trainers\n\n\n\n\n\nName\nDescription\n\n\n\n\nHFRLTrainerBuilder\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)\n\n\n\n\n\ncore.builders.rl.HFRLTrainerBuilder(cfg, model, tokenizer, processor=None)\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)"
+ "text": "core.builders.rl\nBuilder for RLHF trainers\n\n\n\n\n\nName\nDescription\n\n\n\n\nHFRLTrainerBuilder\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)\n\n\n\n\n\ncore.builders.rl.HFRLTrainerBuilder(cfg, model, tokenizer, processor=None)\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)\n\n\n\n\n\nName\nDescription\n\n\n\n\nbuild_collator\nBuild a data collator for preference-tuning trainers.\n\n\n\n\n\ncore.builders.rl.HFRLTrainerBuilder.build_collator(**kwargs)\nBuild a data collator for preference-tuning trainers.\nReturns None for RL types that provide their own collator (e.g. GRPO,\nKTO), letting the trainer construct its default. For DPO/IPO/ORPO/SIMPO\nreturns an AxolotlDPODataCollatorWithPadding when\npad_to_multiple_of is set, otherwise None (so the trainer\nfalls back to the TRL default)."
},
{
"objectID": "docs/api/core.builders.rl.html#classes",
"href": "docs/api/core.builders.rl.html#classes",
"title": "core.builders.rl",
"section": "",
- "text": "Name\nDescription\n\n\n\n\nHFRLTrainerBuilder\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)\n\n\n\n\n\ncore.builders.rl.HFRLTrainerBuilder(cfg, model, tokenizer, processor=None)\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)"
+ "text": "Name\nDescription\n\n\n\n\nHFRLTrainerBuilder\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)\n\n\n\n\n\ncore.builders.rl.HFRLTrainerBuilder(cfg, model, tokenizer, processor=None)\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)\n\n\n\n\n\nName\nDescription\n\n\n\n\nbuild_collator\nBuild a data collator for preference-tuning trainers.\n\n\n\n\n\ncore.builders.rl.HFRLTrainerBuilder.build_collator(**kwargs)\nBuild a data collator for preference-tuning trainers.\nReturns None for RL types that provide their own collator (e.g. GRPO,\nKTO), letting the trainer construct its default. For DPO/IPO/ORPO/SIMPO\nreturns an AxolotlDPODataCollatorWithPadding when\npad_to_multiple_of is set, otherwise None (so the trainer\nfalls back to the TRL default)."
},
{
"objectID": "docs/api/prompt_strategies.alpaca_chat.html",
@@ -5620,7 +5620,7 @@
"href": "docs/faq.html",
"title": "FAQ",
"section": "",
- "text": "General\nQ: The trainer stopped and hasn’t progressed in several minutes.\n\nA: Usually an issue with the GPUs communicating with each other. See the NCCL doc\n\nQ: exitcode: -9\n\nA: This usually happens when you run out of system RAM.\n\nQ: exitcode: -7 while using deepspeed\n\nA: Try upgrading deepspeed w: pip install -U deepspeed\n\nQ: AttributeError: ‘DummyOptim’ object has no attribute ‘step’\nQ: ModuleNotFoundError: No module named ‘mpi4py’ using single GPU with deepspeed\n\nA: You may be using deepspeed with single gpu. Please remove the deepspeed: section in the yaml file or --deepspeed CLI flag.\n\nQ: The codes is stuck on saving preprocessed datasets.\n\nA: This is usually an issue with the GPU. This can be resolved through setting the os environment variable CUDA_VISIBLE_DEVICES=0. If you are on runpod, this is usually a pod issue. Starting a new pod should take care of it.\n\nQ: Received mismatch error on merge adapters / loading adapters between torch.Size of checkpoint and model.\n\nA: This is likely due to vocab size mismatch. By default, Axolotl expands the model’s embeddings if the tokenizer has more tokens than the model. Please use the axolotl merge-lora command to merge the adapters instead of using your own scripts.\n\n\nOn the other hand, if the model has more tokens than the tokenizer, Axolotl does not shrink the model’s embeddings unless shrink_embeddings: true is set in the config.\n\nQ: How to call Axolotl via custom python scripts?\n\nA: Since Axolotl is just Python, please see src/axolotl/cli/main.py on how each command is called.\n\nQ: How to know the value to use for fsdp_transformer_layer_cls_to_wrap?\n\nA: This is the class name of the transformer layer to wrap with FSDP. For example, for LlamaForCausalLM, the value is LlamaDecoderLayer. To find this for a specific model, check the model’s PreTrainedModel definition and look for _no_split_modules variable in the modeling_<model_name>.py file within transformers library.\n\nQ: ValueError: Asking to pad but the tokenizer does not have a padding token. Please select a token to use as pad_token\n\nA: This is because the tokenizer does not have a padding token. Please add a padding token to the tokenizer via:\n\n\nspecial_tokens:\n # str. If you're not sure, set to same as `eos_token`.\n pad_token: \"...\"\n\nQ: IterableDataset error or KeyError: 'input_ids' when using preprocess CLI\n\nA: This is because you may be using preprocess CLI with pretraining_dataset: or skip_prepare_dataset: true respectively. Please use axolotl train CLI directly instead as these datasets are prepared on demand.\n\nQ: vLLM is not working with Axolotl\n\nA: We currently recommend torch 2.6.0 for use with vllm. Please ensure you use the right version. For Docker, please use the main-py3.11-cu124-2.6.0 tag.\n\nQ: FA2 2.8.0 undefined symbol runtime error on CUDA 12.4\n\nA: There seems to be a wheel issue with FA2 2.8.0 on CUDA 12.4. Try CUDA 12.6 instead or downgrade to FA2 2.7.4. Please refer to the upstream issue: https://github.com/Dao-AILab/flash-attention/issues/1717.\n\nQ: Can we mix text and text+image datasets for VLM training?\n\nA: Yes, you can for newer VLM arch. The ones that would not work are LLaVA / Pixtral arch. If you notice one not working, please let us know!\n\nQ: Why is memory/max_* different from nvidia-smi?\n\nA: We use torch APIs to retrieve this information. You can see https://docs.pytorch.org/docs/stable/notes/cuda.html#cuda-memory-management for more information.\n\n\n\nChat templates\nQ: jinja2.exceptions.UndefinedError: 'dict object' has no attribute 'content' / 'role' / ____\n\nA: This means that the property mapping for the stated attribute does not exist when building chat_template prompt. For example, if no attribute 'content', please check you have added the correct mapping for content under message_property_mappings.\n\nQ: Empty template generated for turn ___\n\nA: The content is empty for that turn.\n\nQ: Could not find content start/end boundary for turn __\n\nA: The specific turn’s start/end could not be detected. Please ensure you have set the eos_token following your chat_template. Otherwise, this could be a chat_template which doesn’t use proper boundaries for each turn (like system). On the rare occurrence, make sure your content is not [[dummy_message]]. Please let us know about this.\n\nQ: Content end boundary is before start boundary for turn ___\n\nA: This is an edge case which should not occur. Please create an Issue if this happens.\n\nQ: Content end boundary is the same as start boundary for turn ___. This is likely an empty turn.\n\nA: This is likely an empty turn.\n\nQ: The EOS token is incorrectly being masked or not being masked / EOS token __ not found in chat template.\n\nA: There can be two reasons:\n\n\n\nThis is because of the mismatch between tokenizer.eos_token and EOS token in template. Please make sure to set eos_token: under special_tokens: to the same EOS token as in template.\n\n\n\n\nThe EOS token is not in the template. Please check if your template is correct. As an example, phi_35 template does not use its dedicated EOS token <|endoftext|> at the end.\n\n\nQ: “chat_template choice is tokenizer_default but tokenizer’s chat_template is null. Please add a chat_template in tokenizer config”\n\nA: This is because the tokenizer does not have a chat template. Please add a chat template in the tokenizer config. See chat_template for more details.\n\nQ: The EOT token(s) are incorrectly being masked or not being masked / EOT token __ not found in chat template.\n\nA: There can be two reasons:\n\n\n\nThe EOT token is different from the EOS token and was not specified under eot_tokens:. Please set eot_tokens: to the same EOT token(s) as in template.\n\n\n\n\nThere is more than one EOT token per turn in the template. Please raise an issue with examples as we recognize this as an edge case.\n\n\nQ: EOT token encoding failed. Please check if the token is valid and can be encoded.\n\nA: There could be some issue with the tokenizer or unicode encoding. Please raise an issue with examples with the EOT token & tokenizer causing the issue.\n\nQ: EOT token __ is encoded as multiple tokens.\n\nA: This is because the EOT token is encoded as multiple tokens which can cause unexpected behavior. Please add it under tokens: or (recommended) override unused added_tokens via added_tokens_overrides:.\n\nQ: Conflict between train_on_eos and train_on_eot. eos_token is in eot_tokens and train_on_eos != train_on_eot\n\nA: This is because the EOS token is in the eot_tokens: while mismatch between train_on_eos: and train_on_eot:. This will cause one to override the other. Please ensure that train_on_eos: and train_on_eot: are the same or remove the EOS token from eot_tokens:.\n\nQ: If eot_tokens: is not provided, what happens?\n\nA: If eot_tokens: is not provided, the default behavior is the same as before. EOS tokens used to delimit turns are masked/unmasked depending on whether the turn is trainable.\n\n\nInternally, eot_tokens: tokenizer.eos_token and train_on_eot: train_on_eos (which defaults to turn). This transition helps clarify the naming and behavior of EOT/EOS tokens.\n\nQ: Data processing error: CAS service error\n\nA: Try disabling XET with export HF_HUB_DISABLE_XET=1\n\nQ: torch._inductor.exc.LoweringException: NoValidChoicesError: No choices to select, please consider adding ATEN into max_autotune_gemm_backends config (defined in torch/_inductor/config.py) to allow at least one choice.\n\nA: Depending on the version of torch, you may need to include this in your YAML:\n\n\nflex_attn_compile_kwargs:\n dynamic: false\n mode: max-autotune-no-cudagraphs\n\n**Q: ValueError(\"Backward pass should have cleared tracker of all tensors\")\n\nA: This may happen due to edge cases in using the modern OffloadActivations context manager for CUDA streams. If you encounter this error, you may have success using the naive implementation with offload_activations: legacy in your YAML.\n\n**Q: Error parsing tool_calls arguments as JSON.\n\nA: There is an error parsing string arguments to a dict. Please check your dataset and the error message for more details.",
+ "text": "General\nQ: The trainer stopped and hasn’t progressed in several minutes.\n\nA: Usually an issue with the GPUs communicating with each other. See the NCCL doc\n\nQ: exitcode: -9\n\nA: This usually happens when you run out of system RAM.\n\nQ: exitcode: -7 while using deepspeed\n\nA: Try upgrading deepspeed w: pip install -U deepspeed\n\nQ: AttributeError: ‘DummyOptim’ object has no attribute ‘step’\nQ: ModuleNotFoundError: No module named ‘mpi4py’ using single GPU with deepspeed\n\nA: You may be using deepspeed with single gpu. Please remove the deepspeed: section in the yaml file or --deepspeed CLI flag.\n\nQ: The codes is stuck on saving preprocessed datasets.\n\nA: This is usually an issue with the GPU. This can be resolved through setting the os environment variable CUDA_VISIBLE_DEVICES=0. If you are on runpod, this is usually a pod issue. Starting a new pod should take care of it.\n\nQ: Received mismatch error on merge adapters / loading adapters between torch.Size of checkpoint and model.\n\nA: This is likely due to vocab size mismatch. By default, Axolotl expands the model’s embeddings if the tokenizer has more tokens than the model. Please use the axolotl merge-lora command to merge the adapters instead of using your own scripts.\n\n\nOn the other hand, if the model has more tokens than the tokenizer, Axolotl does not shrink the model’s embeddings unless shrink_embeddings: true is set in the config.\n\nQ: How to call Axolotl via custom python scripts?\n\nA: Since Axolotl is just Python, please see src/axolotl/cli/main.py on how each command is called.\n\nQ: How to know the value to use for fsdp_transformer_layer_cls_to_wrap?\n\nA: This is the class name of the transformer layer to wrap with FSDP. For example, for LlamaForCausalLM, the value is LlamaDecoderLayer. To find this for a specific model, check the model’s PreTrainedModel definition and look for _no_split_modules variable in the modeling_<model_name>.py file within transformers library.\n\nQ: ValueError: Asking to pad but the tokenizer does not have a padding token. Please select a token to use as pad_token\n\nA: This is because the tokenizer does not have a padding token. Please add a padding token to the tokenizer via:\n\n\nspecial_tokens:\n # str. If you're not sure, set to same as `eos_token`.\n pad_token: \"...\"\n\nQ: IterableDataset error or KeyError: 'input_ids' when using preprocess CLI\n\nA: This is because you may be using preprocess CLI with pretraining_dataset: or skip_prepare_dataset: true respectively. Please use axolotl train CLI directly instead as these datasets are prepared on demand.\n\nQ: vLLM is not working with Axolotl\n\nA: We currently recommend torch 2.10 for use with vllm. Please ensure you use the right version. For Docker, please use the main-py3.12-cu128-2.10.0 tag (note: torch 2.10 images are built with Python 3.12).\n\nQ: FA2 2.8.0 undefined symbol runtime error on CUDA 12.4\n\nA: There seems to be a wheel issue with FA2 2.8.0 on CUDA 12.4. Try CUDA 12.6 instead or downgrade to FA2 2.7.4. Please refer to the upstream issue: https://github.com/Dao-AILab/flash-attention/issues/1717.\n\nQ: Can we mix text and text+image datasets for VLM training?\n\nA: Yes, you can for newer VLM arch. The ones that would not work are LLaVA / Pixtral arch. If you notice one not working, please let us know!\n\nQ: Why is memory/max_* different from nvidia-smi?\n\nA: We use torch APIs to retrieve this information. You can see https://docs.pytorch.org/docs/stable/notes/cuda.html#cuda-memory-management for more information.\n\n\n\nChat templates\nQ: jinja2.exceptions.UndefinedError: 'dict object' has no attribute 'content' / 'role' / ____\n\nA: This means that the property mapping for the stated attribute does not exist when building chat_template prompt. For example, if no attribute 'content', please check you have added the correct mapping for content under message_property_mappings.\n\nQ: Empty template generated for turn ___\n\nA: The content is empty for that turn.\n\nQ: Could not find content start/end boundary for turn __\n\nA: The specific turn’s start/end could not be detected. Please ensure you have set the eos_token following your chat_template. Otherwise, this could be a chat_template which doesn’t use proper boundaries for each turn (like system). On the rare occurrence, make sure your content is not [[dummy_message]]. Please let us know about this.\n\nQ: Content end boundary is before start boundary for turn ___\n\nA: This is an edge case which should not occur. Please create an Issue if this happens.\n\nQ: Content end boundary is the same as start boundary for turn ___. This is likely an empty turn.\n\nA: This is likely an empty turn.\n\nQ: The EOS token is incorrectly being masked or not being masked / EOS token __ not found in chat template.\n\nA: There can be two reasons:\n\n\n\nThis is because of the mismatch between tokenizer.eos_token and EOS token in template. Please make sure to set eos_token: under special_tokens: to the same EOS token as in template.\n\n\n\n\nThe EOS token is not in the template. Please check if your template is correct. As an example, phi_35 template does not use its dedicated EOS token <|endoftext|> at the end.\n\n\nQ: “chat_template choice is tokenizer_default but tokenizer’s chat_template is null. Please add a chat_template in tokenizer config”\n\nA: This is because the tokenizer does not have a chat template. Please add a chat template in the tokenizer config. See chat_template for more details.\n\nQ: The EOT token(s) are incorrectly being masked or not being masked / EOT token __ not found in chat template.\n\nA: There can be two reasons:\n\n\n\nThe EOT token is different from the EOS token and was not specified under eot_tokens:. Please set eot_tokens: to the same EOT token(s) as in template.\n\n\n\n\nThere is more than one EOT token per turn in the template. Please raise an issue with examples as we recognize this as an edge case.\n\n\nQ: EOT token encoding failed. Please check if the token is valid and can be encoded.\n\nA: There could be some issue with the tokenizer or unicode encoding. Please raise an issue with examples with the EOT token & tokenizer causing the issue.\n\nQ: EOT token __ is encoded as multiple tokens.\n\nA: This is because the EOT token is encoded as multiple tokens which can cause unexpected behavior. Please add it under tokens: or (recommended) override unused added_tokens via added_tokens_overrides:.\n\nQ: Conflict between train_on_eos and train_on_eot. eos_token is in eot_tokens and train_on_eos != train_on_eot\n\nA: This is because the EOS token is in the eot_tokens: while mismatch between train_on_eos: and train_on_eot:. This will cause one to override the other. Please ensure that train_on_eos: and train_on_eot: are the same or remove the EOS token from eot_tokens:.\n\nQ: If eot_tokens: is not provided, what happens?\n\nA: If eot_tokens: is not provided, the default behavior is the same as before. EOS tokens used to delimit turns are masked/unmasked depending on whether the turn is trainable.\n\n\nInternally, eot_tokens: tokenizer.eos_token and train_on_eot: train_on_eos (which defaults to turn). This transition helps clarify the naming and behavior of EOT/EOS tokens.\n\nQ: Data processing error: CAS service error\n\nA: Try disabling XET with export HF_HUB_DISABLE_XET=1\n\nQ: torch._inductor.exc.LoweringException: NoValidChoicesError: No choices to select, please consider adding ATEN into max_autotune_gemm_backends config (defined in torch/_inductor/config.py) to allow at least one choice.\n\nA: Depending on the version of torch, you may need to include this in your YAML:\n\n\nflex_attn_compile_kwargs:\n dynamic: false\n mode: max-autotune-no-cudagraphs\n\n**Q: ValueError(\"Backward pass should have cleared tracker of all tensors\")\n\nA: This may happen due to edge cases in using the modern OffloadActivations context manager for CUDA streams. If you encounter this error, you may have success using the naive implementation with offload_activations: legacy in your YAML.\n\n**Q: Error parsing tool_calls arguments as JSON.\n\nA: There is an error parsing string arguments to a dict. Please check your dataset and the error message for more details.",
"crumbs": [
"Troubleshooting",
"FAQ"
@@ -6023,7 +6023,7 @@
"href": "docs/config-reference.html",
"title": "Config Reference",
"section": "",
- "text": "# Allow overwrite yml config using from cli\nstrict: bool | None = False\n# Resume from a specific checkpoint dir\nresume_from_checkpoint: str | None\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: bool | None\n# Resize the model embeddings when new tokens are added to multiples of 32. This is\n# reported to improve training speed on some models\nresize_token_embeddings_to_32x: bool | None\nmean_resizing_embeddings: bool | None = False\n\n# Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings: bool | None\n# Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs\nembeddings_skip_upcast: bool | None\n# Reinitialize model weights randomly instead of loading pretrained weights\nreinit_weights: bool | None\n\n# module to custom trainer class to use for training\ntrainer_cls: str | None\n\n# Use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo', 'ebft'\nrl: RLType | None\n\ntrl: TRLConfig | None\n # For TRLConfig:\n # Beta parameter for the RL training. Same as `rl_beta`. Use\n beta: float | None\n # Maximum length of the completion for RL training.\n max_completion_length: int | None\n\n # Whether to use VLLM for RL training.\n use_vllm: bool = False\n # VLLM mode to use, one of 'server' or 'colocate'\n vllm_mode: Literal['server', 'colocate'] | None\n # Host of the vLLM server to connect to.\n vllm_server_host: str | None = 0.0.0.0\n # Port of the vLLM server to connect to.\n vllm_server_port: int | None = 8000\n # Total timeout (in seconds) to wait for the vLLM server to respond.\n vllm_server_timeout: int | None\n # Regex for vLLM guided decoding.\n vllm_guided_decoding_regex: str | None\n\n # List of reward functions to load. Paths must be importable from current dir.\n reward_funcs: list[str] | None\n # List of reward weights for the reward functions.\n reward_weights: list[float] | None\n # Batch size for generation. Controls how many unique prompts are generated per step.\n # Should be num_generations * data_parallel_size for full DP utilization.\n generation_batch_size: int | None\n # Number of generations to sample.\n num_generations: int | None\n # Whether to log completions.\n log_completions: bool | None = False\n # Number of completions to print when log_completions is True.\n num_completions_to_print: int | None\n # Controls whether importance sampling ratios are computed at the `'token'` or\n # `'sequence'` level. For GSPO, use `sequence`, default is None which corresponds to\n # the original GRPO paper.\n importance_sampling_level: Literal['sequence', 'token'] | None\n\n # Whether to sync the reference model.\n sync_ref_model: bool | None = False\n # Mixup alpha for the reference model.\n ref_model_mixup_alpha: float | None = 0.9\n # Sync steps for the reference model.\n ref_model_sync_steps: int | None = 64\n # Whether to scale rewards by their standard deviation.\n scale_rewards: bool = True\n\n # Sampling temperature for the GRPO policy.\n temperature: float | None\n # Top-p sampling probability for the generation policy.\n top_p: float | None\n # Top-k sampling for the generation policy.\n top_k: int | None\n # Minimum probability for the generation policy.\n min_p: float | None\n # Penalty for tokens that appear in prompt and generated text.\n repetition_penalty: float | None\n # Additional generation parameters passed to vLLM SamplingParams. Useful for\n # stop_token_ids, seed, frequency_penalty, etc.\n generation_kwargs: dict[str, Any] | None\n # Additional kwargs for the chat template. E.g., {enable_thinking: false} for Qwen3.5\n # models.\n chat_template_kwargs: dict[str, Any] | None\n # Number of iterations per batch (μ) for GRPO.\n num_iterations: int | None\n # Epsilon value for clipping in the GRPO algorithm.\n epsilon: float | None\n # Upper-bound epsilon value for clipping in the GRPO algorithm.\n epsilon_high: float | None\n # Whether to use Liger loss for GRPO.\n use_liger_loss: bool | None\n # Loss formulation to use. Supported values: grpo, bnpo, dr_grpo.\n loss_type: str | None\n # Whether to exclude truncated completions from loss calculation.\n mask_truncated_completions: bool = False\n # Enable sleep mode for vLLM to offload VRAM when idle\n vllm_enable_sleep_mode: bool | None\n # Path to custom rollout function. Must be importable from current dir.\n rollout_func: str | None\n # Multi-objective reward aggregation strategy. 'sum_then_normalize' (GRPO default):\n # weights and sums rewards first, then normalizes. 'normalize_then_sum' (GDPO):\n # normalizes each reward independently, then sums.\n multi_objective_aggregation: Literal['sum_then_normalize', 'normalize_then_sum'] | None\n\n # Use the GRPODataProducer protocol for online data generation.\n use_data_producer: bool = False\n # Generate rollouts in a background thread while training on the previous rollout.\n async_prefetch: bool = False\n # Number of rollouts to prefetch ahead of training.\n prefetch_depth: int | None\n # Sync model weights to vLLM every N optimizer steps (async mode only).\n vllm_sync_interval: int | None\n # Score prompt groups incrementally instead of the full batch at once.\n streaming_partial_batch: bool | None\n # Minimum prompt groups to score per streaming chunk.\n streaming_min_groups: int | None\n # Apply IS correction for distribution mismatch between vLLM and training model.\n vllm_importance_sampling_correction: bool | None\n # IS mode: token_truncate, token_mask, sequence_truncate, or sequence_mask.\n vllm_importance_sampling_mode: Literal['token_truncate', 'token_mask', 'sequence_truncate', 'sequence_mask'] | None\n # Cap C for IS ratio clipping/masking.\n vllm_importance_sampling_cap: float | None\n # KL threshold for off-policy sequence masking (OPSM). None = disabled.\n off_policy_mask_threshold: float | None\n # Apply IS correction to KL divergence term.\n use_bias_correction_kl: bool | None\n\n # Number of persistent subprocess workers for parallel reward computation. Each worker\n # has its own main thread so signal.alarm() (used by math_verify) works correctly.\n # Work is sharded across workers by prompt groups. Only used with\n # use_data_producer=True and non-nn.Module reward functions.\n reward_num_workers: int = 1\n # [Experimental, disabled by default] Size of the replay buffer for storing high-\n # signal rollout groups. When > 0, groups with reward variance are cached and used to\n # replace zero-signal groups (where all rewards are identical). Set to 0 to disable.\n # Only used with use_data_producer=True.\n replay_buffer_size: int = 0\n # When True (default), recompute old_per_token_logps for replayed groups using the\n # current training model. This fixes the importance sampling mismatch that occurs when\n # replaying stale data. Only relevant when replay_buffer_size > 0.\n replay_recompute_logps: bool = True\n # Fraction of total training steps after which deferred re-rolling begins. Zero-signal\n # prompts (where all rewards in a group are identical) are buffered and re-injected\n # into later batches when the model is more likely to solve them. Set to 1.0 to\n # disable. Only used with use_data_producer=True.\n reroll_start_fraction: float = 1.0\n # Maximum number of prompt groups to replace with re-roll candidates per batch. Higher\n # values increase data utilization but reduce prompt diversity. Only used with\n # use_data_producer=True.\n reroll_max_groups: int = 1\n # When True, skip gradient computation for micro-batches where all advantages are zero\n # (no learning signal). This avoids the forward/backward pass entirely when no\n # learning signal is present. The step is logged with skipped_zero_adv_batches=1 for\n # monitoring.\n skip_zero_advantage_batches: bool = True\n # Sync LoRA adapter to vLLM via filesystem instead of merging + NCCL broadcast. Auto-\n # selects vllm_serve_lora serve module. Syncs only LoRA adapter weights vs full merged\n # model.\n vllm_lora_sync: bool = False\n\nvllm: VllmConfig | None\n # For VllmConfig:\n # Device to use for VLLM\n device: str | None = auto\n # Tensor parallel size for VLLM\n tensor_parallel_size: int | None\n # Data parallel size for VLLM\n data_parallel_size: int | None\n # GPU memory utilization for VLLM\n gpu_memory_utilization: float | None = 0.9\n # Data type for VLLM\n dtype: str | None = auto\n # Maximum length of the model context for VLLM\n max_model_len: int | None\n # Enable prefix caching for VLLM\n enable_prefix_caching: bool | None\n # Host for the vLLM server to start on\n host: str | None = 0.0.0.0\n # Port of the vLLM server to start on\n port: int | None = 8000\n\n # Enable reasoning for VLLM\n enable_reasoning: bool | None\n # Reasoning parser for VLLM\n reasoning_parser: str | None\n # Disable CUDA graph capture in vLLM. Required for models with causal_conv1d (e.g.,\n # Qwen3.5 hybrid linear attention).\n enforce_eager: bool | None\n # Python module for vLLM serve script. Set to 'axolotl.scripts.vllm_serve_lora' for\n # native LoRA support, or leave None for default TRL serve.\n serve_module: str | None\n # vLLM worker extension class for weight synchronization. Defaults to\n # 'trl.scripts.vllm_serve.WeightSyncWorkerExtension'.\n worker_extension_cls: str | None\n\n# Configuration for Energy-Based Fine-Tuning (EBFT)\nebft: EBFTConfig | None\n # For EBFTConfig:\n # Fractional layer depths for feature extraction (e.g., [0.25, 0.5, 0.75])\n feature_layers: list[float] = [0.25, 0.5, 0.75]\n # Embedding method: 'last_token', 'mean_pooling', 'completion_mean', or 'concat'\n embed_method: Literal['last_token', 'mean_pooling', 'completion_mean', 'concat'] = last_token\n # Apply SVD whitening to feature embeddings\n use_whitening: bool = False\n # Coefficient for alignment reward (cosine similarity with ground truth)\n alignment_coef: float = 1.0\n # Coefficient for diversity penalty (pairwise similarity between samples)\n diversity_coef: float = 1.0\n # Cross-entropy loss coefficient on ground-truth tokens\n ce_coef: float = 0.0\n # Set per-batch max_tokens based on ground-truth length\n adaptive_max_tokens: bool = True\n # Multiplier for ground-truth token count when computing adaptive max_tokens\n gt_length_multiplier: float = 1.5\n\n # EBFT mode: 'structured' (QA with vLLM) or 'strided' (unstructured text)\n mode: Literal['structured', 'strided'] = structured\n # Stride between anchor points (tokens)\n stride: int = 8\n # Context window size per block\n context_length: int = 8\n # Tokens to generate per block\n generate_max_len: int = 8\n # Independent rollouts per document\n n_samples_per_prompt: int = 4\n # Sampling temperature for strided generation\n temperature: float = 0.6\n # Top-p nucleus sampling threshold\n top_p: float = 1.0\n # RL policy gradient loss coefficient\n rl_coef: float = 1.0\n # Advantage estimator: 'rloo', 'group_norm', 'reinforce'\n advantage_estimator: Literal['rloo', 'group_norm', 'reinforce'] = rloo\n # Minimum tokens into completion before placing anchors. Skips anchors too close to\n # the prompt boundary where features are dominated by prompt context.\n min_completion_prefix: int = 0\n\nqat: QATConfig | None\n # For QATConfig:\n # Fake quantization layout to use for activation quantization.\n activation_dtype: TorchAOQuantDType | None\n # Fake quantization layout to use for weight quantization.\n weight_dtype: TorchAOQuantDType = TorchAOQuantDType.int8\n # Quantize embedding\n quantize_embedding: bool | None = False\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n # The number of steps to apply fake quantization after\n fake_quant_after_n_steps: int | None\n\nquantization: PTQConfig | None\n # For PTQConfig:\n # Fake quantization layout to use for weight quantization.\n weight_dtype: TorchAOQuantDType = TorchAOQuantDType.int8\n # Fake quantization layout to use for activation quantization.\n activation_dtype: TorchAOQuantDType | None\n # Whether to quantize the embedding layer.\n quantize_embedding: bool | None\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n\n# Reward modelling: `True` or `False`\nreward_model: bool | None\n\n# Configuration for dynamic checkpointing (trigger by file or signal). Set 'enabled:\n# true' to activate this feature.\ndynamic_checkpoint: DynamicCheckpointConfig | None\n # For DynamicCheckpointConfig:\n # Enable dynamic checkpoint triggering during training. Create a file\n # 'axolotl_checkpoint.save' in the configured `output_dir` to trigger.\n enabled: bool = False\n # Check for trigger file every N steps (reduces I/O overhead). Default: 100\n check_interval: int = 10\n # Custom trigger filename (optional). If not specified, defaults to\n # 'axolotl_checkpoint.save'. Specify a filename (not a full path) to override the\n # default.\n trigger_file_path: str = \n\n# Process reward modelling: `True` or `False`\nprocess_reward_model: bool | None\n# Coefficient to incentivize the reward model to output mean-zero rewards (proposed by\n# https://huggingface.co/papers/2312.09244, Eq. 2). Recommended value: `0.01`.\ncenter_rewards_coefficient: float | None\nnum_labels: int | None\n\n# Whether to perform weighting in DPO trainer\ndpo_use_weighting: bool | None\ndpo_label_smoothing: float | None\n# Precompute reference model log probabilities for DPO\nprecompute_ref_log_probs: bool | None\n\n# Whether to use Liger kernel for DPO loss.\ndpo_use_liger_kernel: bool | None\n\ndpo_padding_free: bool | None\n\n# List of DPO losses to use.\ndpo_loss_type: Annotated[list[str], MinLen(1)] | None\n\n# Weights for each DPO loss.\ndpo_loss_weights: Annotated[list[float], MinLen(1)] | None\n\n# A list of one or more datasets to finetune the model with\ndatasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset | SyntheticDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n # Key containing the reasoning trace (default: \"reasoning_content\").\n field_thinking: str | None\n # The key the chat template expects that indicates the reasoning trace.\n template_thinking_key: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n # For SyntheticDataset:\n path: Literal['synthetic'] = synthetic\n type: Literal['_synthetic'] = _synthetic\n # Number of rows to generate\n length: int = 1000\n # Sequence length per row (defaults to sequence_len from config)\n sequence_length: int | None\n # Minimum token ID for generation\n min_input_id: int = 100\n # Maximum token ID for generation (defaults to tokenizer vocab_size)\n max_input_id: int | None\n # Random seed for reproducibility\n seed: int | None\n\n# A list of one or more datasets to eval the model with. You can use either\n# test_datasets, or val_set_size, but not both.\ntest_datasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset | SyntheticDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n # Key containing the reasoning trace (default: \"reasoning_content\").\n field_thinking: str | None\n # The key the chat template expects that indicates the reasoning trace.\n template_thinking_key: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n # For SyntheticDataset:\n path: Literal['synthetic'] = synthetic\n type: Literal['_synthetic'] = _synthetic\n # Number of rows to generate\n length: int = 1000\n # Sequence length per row (defaults to sequence_len from config)\n sequence_length: int | None\n # Minimum token ID for generation\n min_input_id: int = 100\n # Maximum token ID for generation (defaults to tokenizer vocab_size)\n max_input_id: int | None\n # Random seed for reproducibility\n seed: int | None\n\n# If false, the datasets will not be shuffled and will keep their original order in\n# `datasets`. The same applies to the `test_datasets` option and the\n# `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: bool | None = True\n# If true, each dataset in `datasets` will be shuffled before merging. This allows\n# curriculum learning strategies to be applied at the dataset level. Default is false.\nshuffle_before_merging_datasets: bool | None = False\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: str | None\n# Num shards for whole dataset\ndataset_shard_num: int | None\n# Index of shard to use for whole dataset\ndataset_shard_idx: int | None\nskip_prepare_dataset: bool | None = False\n# Number of shards to save the prepared dataset\nnum_dataset_shards_to_save: int | None\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset: Annotated[list[PretrainingDataset | SFTDataset], MinLen(1)] | None\n # For PretrainingDataset:\n name: str | None\n path: str | None\n split: str | None = train\n text_column: str | None = text\n type: str | None = pretrain\n trust_remote_code: bool | None = False\n data_files: str | None\n skip: int | None\n\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n # Key containing the reasoning trace (default: \"reasoning_content\").\n field_thinking: str | None\n # The key the chat template expects that indicates the reasoning trace.\n template_thinking_key: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n# The maximum number of processes to use while preprocessing your input dataset. This\n# defaults to `os.cpu_count()` if not set. For Runpod VMs, it will default to number of\n# vCPUs via RUNPOD_CPU_COUNT.\ndataset_processes: int | None\n# The maximum number of processes to use while preprocessing your input dataset. This\n# defaults to `os.cpu_count()` if not set. For Runpod VMs, it will default to number of\n# vCPUs via RUNPOD_CPU_COUNT.\ndataset_num_proc: int | None\n\n# Deduplicates datasets and test_datasets with identical entries\ndataset_exact_deduplication: bool | None\n# Keep dataset in memory while preprocessing. Only needed if cached dataset is taking\n# too much storage\ndataset_keep_in_memory: bool | None\ndataloader_pin_memory: bool | None\ndataloader_num_workers: int | None\ndataloader_prefetch_factor: int | None\ndataloader_drop_last: bool | None\n\naccelerator_config: dict[str, Any] | None\n\nremove_unused_columns: bool | None\n\n# Push prepared dataset to hub - repo_org/repo_name\npush_dataset_to_hub: str | None\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private\n# datasets. Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: bool | None\n\ndevice: Any | None\n# Passed through to transformers when loading the model when launched without\n# accelerate. Use `sequential` when training w/ model parallelism to limit memory\ndevice_map: Any | None\nworld_size: int | None\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank: int | None\nddp: bool | None\n\n# Seed for reproducibility\nseed: int | None\n# Advanced DDP Arguments - timeout\nddp_timeout: int | None\n# Advanced DDP Arguments - bucket cap in MB\nddp_bucket_cap_mb: int | None\n# Advanced DDP Arguments - broadcast buffers\nddp_broadcast_buffers: bool | None\nddp_find_unused_parameters: bool | None\n\n# Whether to run causal language model evaluation for metrics in\n# `eval_causal_lm_metrics`\ndo_causal_lm_eval: bool | None\n# HF evaluate metrics used during evaluation. Default is ['sacrebleu', 'comet', 'ter',\n# 'chrf', 'perplexity']\neval_causal_lm_metrics: list[str] | None\ndo_bench_eval: bool | None\nbench_dataset: str | None\nbench_split: str | None\nmetric_for_best_model: str | None\ngreater_is_better: bool | None\n\n# High loss value, indicating the learning has broken down (a good estimate is ~2 times\n# the loss at the start of training)\nloss_watchdog_threshold: float | None\n# Number of high-loss steps in a row before the trainer aborts (default: 3)\nloss_watchdog_patience: int | None\n\n# Run garbage collection every `gc_steps` steps. -1 will run on epoch end and before\n# evaluations. Default is 0 (disabled).\ngc_steps: int | None\n\n# Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection.\n# require >=ampere\nbf16: Literal['auto'] | bool | None = auto\n# Use CUDA fp16\nfp16: bool | None\n# Enable FP8 mixed precision training using TorchAO. Best used in combination with\n# torch.compile.\nfp8: bool | None\n# Enable FSDP float8 all-gather optimization for FP8 training. Can improve training\n# speed by 10-15% when FSDP is enabled.\nfp8_enable_fsdp_float8_all_gather: bool | None\n# No AMP (automatic mixed precision) - require >=ampere\nbfloat16: bool | None\n# No AMP (automatic mixed precision)\nfloat16: bool | None\n# bool to use CUDA tf32 or 'auto' for automatic detection - require >=ampere\ntf32: Literal['auto'] | bool | None = auto\nfloat32: bool | None\n\n# Whether to use gradient checkpointing. Available options are: true, false, 'offload',\n# 'offload_disk'.\n# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: Literal['offload', 'offload_disk'] | bool | None = False\n# Additional kwargs to pass to the trainer for gradient checkpointing\ngradient_checkpointing_kwargs: dict[str, Any] | None\n# Whether to offload activations. Available options are: true, false, 'legacy', 'disk'.\nactivation_offloading: Literal['legacy', 'disk'] | bool | None = False\n# Offload model layer parameters to CPU during forward, prefetch back during backward.\nlayer_offloading: bool | None = False\n\n# Freeze multimodal encoder parameters (vision, audio, etc.) for text-only training of\n# multimodal models. When True, parameters belonging to vision towers, audio towers,\n# multimodal projectors, and similar non-language modules are frozen\n# (requires_grad=False). This allows DDP training without\n# ddp_find_unused_parameters=True.\nfreeze_mm_modules: bool | None\n\n# List of regex patterns for parameter names to keep unfrozen. All other parameters will\n# be frozen via requires_grad=False. Note: range-based patterns (e.g.\n# embed_tokens.weight$[:32000]) use gradient zeroing rather than a true freeze, so\n# weight decay will still apply to the frozen portion and optimizer states are allocated\n# for the full parameter.\nunfrozen_parameters: list[str] | None\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: int = 512\n# What to do when a tokenized row exceeds sequence_len. 'drop' removes the row;\n# 'truncate' slices tensors to sequence_len; 'raise' raises a ValueError. Defaults to\n# 'drop' for backward compatibility.\nexcess_length_strategy: Literal['drop', 'truncate', 'raise'] | None\n# The maximum length of an input for evaluation. If not specified, defaults to\n# sequence_len\neval_sequence_len: int | None\nmin_sample_len: int | None\n# maximum prompt length for RL training\nmax_prompt_len: int | None\n# Use efficient multi-packing with block diagonal attention and per sequence\n# position_ids. Recommend set to 'true'\nsample_packing: bool | None\n# The number of samples packed at a time. Increasing the following values helps with\n# packing, but usually only slightly (<%1.)\nsample_packing_group_size: int | None = 100000\n# The number of samples which can be packed into one sequence. Increase if using a large\n# sequence_len with many short samples.\nsample_packing_bin_size: int | None = 200\n# Whether to pack samples sequentially\nsample_packing_sequentially: bool | None\n# The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or\n# 'forkserver'\nsample_packing_mp_start_method: str | None\n# Set to 'false' if getting errors during eval with sample_packing on\neval_sample_packing: bool | None\n# Pad inputs so each step uses constant sized buffers. This will reduce memory\n# fragmentation and may prevent OOMs, by re-using memory more efficiently. Defaults to\n# True if `sample_packing` enabled\npad_to_sequence_len: bool | None\n# Whether to use sequential sampling for curriculum learning\ncurriculum_sampling: bool | None\nmultipack_real_batches: bool | None\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening: Literal['auto'] | bool | None\n\nuse_pose: bool | None\npose_split_on_token_ids: list[int] | None\npose_max_context_len: int | None\npose_num_chunks: int | None\n\npretrain_multipack_buffer_size: int | None\n# whether to prevent cross attention for packed sequences during pretraining\npretrain_multipack_attn: bool | None = True\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation: bool | None\n\n# Use streaming mode for loading datasets\nstreaming: bool | None\n# Buffer size for multipack streaming datasets\nstreaming_multipack_buffer_size: int | None = 10000\n\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers\nxformers_attention: bool | None\n# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/\n# torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention: bool | None\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention: bool | None\nflex_attention: bool | None\nflex_attn_compile_kwargs: dict[str, Any] | None\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention\nflash_attention: bool | None\n# Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_cross_entropy: bool | None\n# Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_rms_norm: bool | None\n# Whether to fuse part of the MLP into a single operation\nflash_attn_fuse_mlp: bool | None\n# Whether to use bettertransformers\nflash_optimum: bool | None\n# Whether to use SageAttention https://github.com/thu-ml/SageAttention\nsage_attention: bool | None\n\neager_attention: bool | None\n\n# Specify a custom attention implementation, used mostly for kernels.\nattn_implementation: str | None\n\n# Use hybrid attention for Gemma 4: flash_attention_2 for sliding window layers and sdpa\n# for global (full_attention) layers. Global layers have head_dim=512 which exceeds\n# flash attention's supported size.\ngemma4_hybrid_attn_impl: bool | None\n\n# Which experts implementation to use for MoE models,\nexperts_implementation: str | None\n\n# Quantize MoE expert weights on load to reduce VRAM. Requires adapter (lora/qlora) with\n# load_in_4bit or load_in_8bit. Requires CUDA (not compatible with ROCm or other\n# backends). Note: total parameter count may be reported incorrectly when enabled\n# (trainable param count is correct).\nquantize_moe_experts: bool = False\n\n# Whether to use Scaled Softmax (SSMax) attention. Ref: https://arxiv.org/abs/2501.19399\nscaling_softmax: bool | None\n# Scaling factor for SSMax attention. Default is 0.43\nscaling_softmax_factor: float | None\n# Bias for SSMax attention. Default is 0.0. Note: The paper recommends bias=0 for better\n# length generalization.\nscaling_softmax_bias: float | None\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_mlp_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_qkv_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_o_kernel: bool | None\n# Apply custom LoRA autograd function for embedding layers. See:\n# https://docs.axolotl.ai/docs/lora_optims.html\nlora_embedding_kernel: bool | None\n\n# Whether to use chunked cross entropy loss for memory efficiency\nchunked_cross_entropy: bool | None\n# Number of chunks to use for chunked cross entropy loss\nchunked_cross_entropy_num_chunks: int | None\n# Enable Entropy-Aware Focal Training loss (EAFT)\nuse_eaft: bool | None\n# Exponent for entropy weighting in EAFT (default: 1.0)\neaft_alpha: float | None = 1.0\n# Number of top logits for entropy approximation (default: 20)\neaft_k: int | None = 20\n\n# Whether to use ALST tiled mlp for memory efficient long context\ntiled_mlp: bool | None\n\n# Number of shards to use for ALST tiled mlp. If unset, it will be set based on\n# seqlen/hidden_size\ntiled_mlp_num_shards: int | None\n\n# Whether to use original mlp for ALST tiled mlp. Otherwise uses a generic MLP based on\n# llama.\ntiled_mlp_use_original_mlp: bool | None = True\n\nllama4_linearized_experts: bool | None\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed: str | dict[str, Any] | None\n# Whether to use deepcompile for faster training with deepspeed\ndeepcompile: bool | None\n# FSDP configuration\nfsdp: list[str] | None\n\n# FSDP configuration options\nfsdp_config: FSDPConfig | None\n # For FSDPConfig:\n # FSDP version\n fsdp_version: int | None\n # Enable activation checkpointing to reduce memory usage during forward passes\n activation_checkpointing: bool | None\n # Offload parameters to CPU to reduce GPU memory usage\n offload_params: bool | None\n # Synchronize module states across all processes\n sync_module_states: bool | None\n # Enable CPU RAM efficient loading to reduce memory usage during model loading\n cpu_ram_efficient_loading: bool | None\n # Disabling this enables swap memory usage for resource-constrained setups when\n # offload_params is enabled.\n cpu_offload_pin_memory: bool | None\n # Use original parameters instead of flattened parameters\n use_orig_params: bool | None\n\n # Type of state dict to use for saving/loading checkpoints\n state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n # Final state dict type to use after training completion\n final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n\n # Policy for automatically wrapping modules with FSDP\n auto_wrap_policy: Literal['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP'] | None\n # Class name of transformer layers to wrap (e.g., 'LlamaDecoderLayer')\n transformer_layer_cls_to_wrap: str | None\n\n # Reshard parameters after forward pass to save memory\n reshard_after_forward: bool | None\n # Mixed precision policy for FSDP (e.g., 'fp16', 'bf16')\n mixed_precision_policy: str | None\n\n# FSDP version\nfsdp_version: int | None\nfsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for\n# no eval.\nval_set_size: float | None = 0.0\n\n# Number of devices to shard across. If not set, will use all available devices.\ndp_shard_size: int | None\n# Number of devices to replicate across.\ndp_replicate_size: int | None\n# Deprecated: use `context_parallel_size` instead\nsequence_parallel_degree: int | None\n# Set to a divisor of the number of GPUs available to split sequences into chunks of\n# equal size. Use in long context training to prevent OOM when sequences cannot fit into\n# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each\n# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized\n# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more\n# details.\ncontext_parallel_size: int | None\n# Optional; strides across the key dimension. Larger values use more memory but should\n# make training faster. Must evenly divide the number of KV heads in your model.\nheads_k_stride: int | None\n# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to\n# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing\n# case.\nring_attn_func: RingAttnFunc | None\n# Number of tensor parallel processes in TP group. Only supported with DeepSpeed AutoTP.\ntensor_parallel_size: int | None\n\n# Add or change special tokens. If you add tokens here, you don't need to add them to\n# the `tokens` list.\nspecial_tokens: SpecialTokensConfig | None\n # For SpecialTokensConfig:\n bos_token: str | None\n eos_token: str | None\n pad_token: str | None\n unk_token: str | None\n additional_special_tokens: list[str] | None\n\n# Add extra tokens to the tokenizer\ntokens: list[str] | None\n# Mapping token_id to new_token_string to override reserved added_tokens in the\n# tokenizer. Only works for tokens that are not part of the base vocab (aka are\n# added_tokens). Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: dict[int, str] | None\n\n# Whether to use torch.compile and which backend to use. setting to `auto` will enable\n# torch compile when torch>=2.6.0\ntorch_compile: Literal['auto'] | bool | None\n# Backend to use for torch.compile\ntorch_compile_backend: str | None\ntorch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None\n\n# Maximum number of iterations to train for. It precedes num_epochs which means that if\n# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =>\n# `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps: int | None\n# Number of warmup steps. Cannot use with warmup_ratio\nwarmup_steps: int | None\n# Warmup ratio. Cannot use with warmup_steps\nwarmup_ratio: float | None\n# Leave empty to eval at each epoch, integer for every N steps. float for fraction of\n# total steps\neval_steps: int | float | None\n# Number of times per epoch to run evals, mutually exclusive with eval_steps\nevals_per_epoch: int | None\n# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer\n# from `eval_steps`\neval_strategy: str | None\n\n# Leave empty to save at each epoch, integer for every N steps. float for fraction of\n# total steps\nsave_steps: int | float | None\n# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsaves_per_epoch: int | None\n# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better\n# result is achieved, leave empty to infer from `save_steps`\nsave_strategy: str | None\n# Checkpoints saved at a time\nsave_total_limit: int | None\n# Whether to checkpoint a model after the first step of training. Defaults to False.\nsave_first_step: bool | None\n\n# Logging frequency\nlogging_steps: int | None\n# Stop training after this many evaluation losses have increased in a row. https://huggi\n# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin\n# gCallback\nearly_stopping_patience: int | None\nload_best_model_at_end: bool | None = False\n# Save only the model weights, skipping the optimizer. Using this means you can't resume\n# from checkpoints.\nsave_only_model: bool | None = False\n# Use tensorboard for logging\nuse_tensorboard: bool | None\n# Enable the pytorch profiler to capture the first N steps of training to the\n# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more\n# information. Snapshots can be visualized @ https://pytorch.org/memory_viz\nprofiler_steps: int | None\n# Which step to start the profiler at. Useful for only capturing a few steps mid-run.\nprofiler_steps_start: int | None = 0\n# bool of whether to report tokens per second at the end of training. This is not\n# supported with pre-training datasets.\ninclude_tokens_per_second: bool | None\n# bool of whether to report tokens per second per-gpu during training by measuring\n# throughput of non-padding tokens.\ninclude_tkps: bool | None = True\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to\n# add noise to embeddings. Currently only supported on Llama and Mistral\nneftune_noise_alpha: float | None\n\n# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to\n# `beta` in `ORPOConfig` due to trl mapping.\norpo_alpha: float | None\n# Target reward margin for the SimPO loss\nsimpo_gamma: float | None\n# Weight of the BC regularizer\ncpo_alpha: float | None\n\n# Factor for desirable loss term in KTO loss\nkto_desirable_weight: float | None\n# Factor for undesirable loss term in KTO loss\nkto_undesirable_weight: float | None\n# The beta parameter for the RL training\nrl_beta: float | None\n\n# Defines the max memory usage per gpu on the system. Passed through to transformers\n# when loading the model.\nmax_memory: dict[int | Literal['cpu', 'disk'], int | str] | None\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in\n# gigabytes); default: unset\ngpu_memory_limit: int | str | None\n# Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage: bool | None\n\n# The name of the chat template to use for training, following values are supported:\n# tokenizer_default: Uses the chat template that is available in the\n# tokenizer_config.json. If the chat template is not available in the tokenizer, it will\n# raise an error. This is the default value.\n# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.\n# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not\n# available in the tokenizer. jinja: Uses a custom jinja template for the chat template.\n# The custom jinja template should be provided in the chat_template_jinja field. The\n# selected chat template will be saved to the tokenizer_config.json for easier\n# inferencing\nchat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None\n# Custom jinja template or path to jinja file for chat template. This will be only used\n# if chat_template is set to `jinja` or `null` (in which case chat_template is\n# automatically set to `jinja`). Default is null.\nchat_template_jinja: str | None\n# Additional kwargs to pass to the chat template. This is useful for customizing the\n# chat template. For example, you can pass `thinking=False` to add a generation prompt\n# to the chat template.\nchat_template_kwargs: dict[str, Any] | None\n# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the\n# boundaries between conversation turns. For example: ['/INST', '</s>',\n# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is\n# useful for templates that use multiple delimiter tokens.\neot_tokens: list[str] | None\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: str | None\n\n# Token index or indices to adjust embedding weights to the mean of the other tokens.\n# This is useful when the model has untrained embeddings.\nfix_untrained_tokens: int | list[int] | None\n\nis_preprocess: bool | None\npreprocess_iterable: bool | None\n\n# Total number of tokens - internal use\ntotal_num_tokens: int | None\ntotal_supervised_tokens: int | None\n# You can set these packing optimizations AFTER starting a training at least once. The\n# trainer will provide recommended values for these values.\nsample_packing_eff_est: float | None\naxolotl_config_path: str | None\n\n# Internal use only - Used to identify which the model is based on\nis_falcon_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_llama_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on. Please note that if\n# you set this to true, `padding_side` will be set to 'left' by default\nis_mistral_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_qwen_derived_model: bool | None\n\n# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available\n# plugins or doc below for more details.\n# https://docs.axolotl.ai/docs/custom_integrations.html\nplugins: list[str] | None\n# Enable sample generation during training for monitoring\ngenerate_samples: bool | None = False\n# Number of samples to generate at each interval\nnum_generation_samples: int | None = 3\n# Maximum new tokens to generate per sample\ngeneration_max_new_tokens: int | None = 50\n# Temperature for sample generation (0.0 = greedy)\ngeneration_temperature: float | None = 0.7\n# Nucleus sampling parameter for generation\ngeneration_top_p: float | None\n# Top-k sampling parameter for generation\ngeneration_top_k: int | None\n# Ratio of input to use as prompt (0.0-1.0)\ngeneration_prompt_ratio: float | None = 0.5\n# Whether to use sampling (vs greedy decoding)\ngeneration_do_sample: bool | None = True\n\n# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This\n# can also be a relative path to a model on disk\nbase_model: str (required)\n# If the base_model repo on hf hub doesn't include configuration .json files, You can\n# set that here, or leave this empty to default to base_model\nbase_model_config: str | None\n# transformers config class (e.g., 'LlamaConfig', 'MistralConfig'). Defaults to\n# AutoConfig.\ncls_model_config: str | None\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config: str | None\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast: bool | None\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy: bool | None\n# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-\n# common tokenizer.\ntokenizer_use_mistral_common: bool | None\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: str | None\n# transformers processor class\nprocessor_type: str | None\n# kwargs forwarded to the processor's from_pretrained(), overriding processor config\n# (e.g. image_seq_length, min_pixels, etc.).\nprocessor_kwargs: dict[str, Any] | None\n# Whether to save jinja files for tokenizer, transformers default is True\ntokenizer_save_jinja_files: bool | None = True\n# Trust remote code for untrusted source\ntrust_remote_code: bool | None\n\n# Don't move the model to the device before sharding. Set to `false` to revert to legacy\n# behavior.\nexperimental_skip_move_to_device: bool | None = True\n\n# Use custom kernels, e.g. MegaBlocks.\nuse_kernels: bool | None\n\n# Model loading quantization config\nmodel_quantization_config: Literal['Mxfp4Config', 'FineGrainedFP8Config'] | None\n# kwargs for model quantization config\nmodel_quantization_config_kwargs: dict[str, Any] | None\n\n# Where to save the full-finetuned model to\noutput_dir: str = ./model-out\n# push checkpoints to hub\nhub_model_id: str | None\n# how to push checkpoints to hub\nhub_strategy: str | None\n# branch/revision to push to on hub (default: main)\nhub_revision: str | None\n# Whether to save the model using safetensors format. Defaults to True.\nsave_safetensors: bool | None = True\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: bool | None = False\n# Use bitsandbytes 4 bit\nload_in_4bit: bool | None = False\n\n# If you want to use 'lora', 'qlora', or 'llama-adapter', or leave blank to train all\n# parameters in original model\nadapter: Literal['lora', 'qlora', 'llama-adapter'] | None\n# If you already have a lora model trained that you want to load, put that here. This\n# means after training, if you want to test the model, you should set this to the value\n# of `output_dir`. Note that if you merge an adapter to the base model, a new\n# subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir: str | None\nlora_r: int | None\nlora_alpha: int | None\nlora_fan_in_fan_out: bool | None\nlora_target_modules: str | list[str] | None\nlora_target_parameters: str | list[str] | None\n# If true, will target all linear modules\nlora_target_linear: bool | None\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules\n# because they need to know the new tokens. For LLaMA and Mistral, you need to save\n# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts\n# tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\nlora_modules_to_save: list[str] | None\nlora_dropout: float | None = 0.0\n# The layer indices to transform, otherwise, apply to all layers\npeft_layers_to_transform: list[int] | None\npeft_layers_pattern: list[str] | None\n\npeft: PeftConfig | None\n # For PeftConfig:\n # Configuration options for loftq initialization for LoRA\n loftq_config: LoftQConfig | None\n # For LoftQConfig:\n # typically 4 bits\n loftq_bits: int = 4\n\n# Whether to use DoRA.\npeft_use_dora: bool | None\n# Whether to use RSLoRA.\npeft_use_rslora: bool | None\n# List of layer indices to replicate.\npeft_layer_replication: list[tuple[int, int]] | None\n# How to initialize LoRA weights. Default to True which is MS original implementation.\npeft_init_lora_weights: bool | str | None\n# A list of token indices to fine-tune on the `embed_tokens` layer. Otherwise, a dict\n# mapping an embedding layer name to its trainable token indices. See\n# https://huggingface.co/docs/peft/v0.17.0/en/developer_guides/lora#efficiently-train-\n# tokens-alongside-lora\npeft_trainable_token_indices: list[int] | dict[str, list[int]] | None\n# Whether to tie adapter weights for tied model weights. See\n# https://github.com/huggingface/peft/issues/2864\npeft_ensure_weight_tying: bool | None\n# Whether to upcast the LoRA adapter to fp32. This is enabled by default in PEFT.\npeft_autocast_adapter_dtype: bool | None\n\n# load qlora model in sharded format for FSDP using answer.ai technique.\nqlora_sharded_model_loading: bool | None = False\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it\n# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: bool | None\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: bool | None\n# optional overrides to the bnb 4bit quantization configuration\nbnb_config_kwargs: dict[str, Any] | None\n\n# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_ratio: float | None\n# loraplus learning rate for lora embedding layers. Default value is 1e-6.\nloraplus_lr_embedding: float | None = 1e-06\n\nmerge_lora: bool | None\n# Method to use for LoRA merging. 'memory_efficient' (default) processes shards\n# individually to reduce memory usage, 'legacy' loads the full model into memory.\nmerge_method: Literal['legacy', 'memory_efficient'] | None = memory_efficient\n\n# Whether to use ReLoRA. Use with jagged_restart_*steps options.\nrelora: bool | None\n# threshold for optimizer magnitude when pruning\nrelora_prune_ratio: float | None\n# True to perform lora weight merges on cpu during restarts, for modest gpu memory\n# savings\nrelora_cpu_offload: bool | None\n\n# how often to reset for jagged restarts\njagged_restart_steps: int | None\n# how many warmup steps to take after reset for jagged restarts\njagged_restart_warmup_steps: int | None\n# how many anneal steps to take before reset for jagged restarts\njagged_restart_anneal_steps: int | None\n\n# If greater than 1, backpropagation will be skipped and the gradients will be\n# accumulated for the given number of steps.\ngradient_accumulation_steps: int | None = 1\n# The number of samples to include in each batch. This is the number of samples sent to\n# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: int | None = 1\n# Total batch size, we do not recommended setting this manually\nbatch_size: int | None\n# per gpu micro batch size for evals, defaults to value of micro_batch_size\neval_batch_size: int | None\n\n# whether to find batch size that fits in memory. Passed to underlying transformers\n# Trainer\nauto_find_batch_size: bool | None\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: bool | None = False\n# Group similarly sized data to minimize padding. May be slower to start, as it must\n# download and sort the entire dataset. Note that training loss may have an oscillating\n# pattern with this enabled.\ngroup_by_length: bool | None\n\nlearning_rate: str | float (required)\nembedding_lr: float | None\nembedding_lr_scale: float | None\n# Specify weight decay\nweight_decay: float | None = 0.0\n# Specify optimizer\noptimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED\n# Dictionary of arguments to pass to the optimizer\noptim_args: str | dict[str, Any] | None\n# The target modules to optimize, i.e. the module names that you would like to train,\n# right now this is used only for GaLore algorithm\noptim_target_modules: list[str] | Literal['all_linear'] | None\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path: str | None\nlr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler_kwargs: dict[str, Any] | None\nlr_quadratic_warmup: bool | None\n# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of\n# peak lr\ncosine_min_lr_ratio: float | None\n# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means\n# start cosine_min_lr at 80% of training step\ncosine_constant_lr_ratio: float | None\n# Learning rate div factor\nlr_div_factor: float | None\n\nlr_groups: list[LrGroup] | None\n # For LrGroup:\n name: str (required)\n modules: list[str] (required)\n lr: float (required)\n\n# adamw hyperparams\nadam_epsilon: float | None\n# only used for CAME Optimizer\nadam_epsilon2: float | None\n# adamw hyperparams\nadam_beta1: float | None\n# adamw hyperparams\nadam_beta2: float | None\n# only used for CAME Optimizer\nadam_beta3: float | None\n\n# Dion Optimizer learning rate\ndion_lr: float | None\n# Dion Optimizer momentum\ndion_momentum: float | None\n# Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank\n# dimension.\ndion_rank_fraction: float | None = 1.0\n# Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may\n# be useful to ensure even sharding.\ndion_rank_multiple_of: int | None = 1\n\n# Gradient clipping max norm\nmax_grad_norm: float | None\nnum_epochs: float = 1.0\n\nuse_wandb: bool | None\n# Set the name of your wandb run\nwandb_name: str | None\n# Set the ID of your wandb run\nwandb_run_id: str | None\n# \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn\n# off wandb\nwandb_mode: str | None\n# Your wandb project name\nwandb_project: str | None\n# A wandb Team name if using a Team\nwandb_entity: str | None\nwandb_watch: str | None\n# \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only\n# at the end of training\nwandb_log_model: str | None\n\nuse_mlflow: bool | None\n# URI to mlflow\nmlflow_tracking_uri: str | None\n# Your experiment name\nmlflow_experiment_name: str | None\n# Your run name\nmlflow_run_name: str | None\n# set to true to copy each saved checkpoint on each save to mlflow artifact registry\nhf_mlflow_log_artifacts: bool | None\n\n# Enable or disable Comet integration.\nuse_comet: bool | None\n# API key for Comet. Recommended to set via `comet login`.\ncomet_api_key: str | None\n# Workspace name in Comet. Defaults to the user's default workspace.\ncomet_workspace: str | None\n# Project name in Comet. Defaults to Uncategorized.\ncomet_project_name: str | None\n# Identifier for the experiment. Used to append data to an existing experiment or\n# control the key of new experiments. Default to a random key.\ncomet_experiment_key: str | None\n# Create a new experiment (\"create\") or log to an existing one (\"get\"). Default\n# (\"get_or_create\") auto-selects based on configuration.\ncomet_mode: str | None\n# Set to True to log data to Comet server, or False for offline storage. Default is\n# True.\ncomet_online: bool | None\n# Dictionary for additional configuration settings, see the doc for more details.\ncomet_experiment_config: dict[str, Any] | None\n\nuse_trackio: bool | None\n# Your trackio project name\ntrackio_project_name: str | None\n# Set the name of your trackio run\ntrackio_run_name: str | None\n# Hugging Face Space ID to sync dashboard to (optional, runs locally if not provided)\ntrackio_space_id: str | None\n\n# Enable OpenTelemetry metrics collection and Prometheus export\nuse_otel_metrics: bool | None = False\n# Host to bind the OpenTelemetry metrics server to\notel_metrics_host: str | None = localhost\n# Port for the Prometheus metrics HTTP server\notel_metrics_port: int | None = 8000\n\n# the number of activate layers in LISA\nlisa_n_layers: int | None\n# how often to switch layers in LISA\nlisa_step_interval: int | None\n# path under the model to access the layers\nlisa_layers_attribute: str | None = model.layers\n\ngradio_title: str | None\ngradio_share: bool | None\ngradio_server_name: str | None\ngradio_server_port: int | None\ngradio_max_new_tokens: int | None\ngradio_temperature: float | None\n\nuse_ray: bool = False\nray_run_name: str | None\nray_num_workers: int = 1\nresources_per_worker: dict\n\n# The size of the image to resize to. It can be an integer (resized into padded-square\n# image) or a tuple (width, height).If not provided, we will attempt to load from\n# preprocessor.size, otherwise, images won't be resized.\nimage_size: int | tuple[int, int] | None\n# The resampling algorithm to use for image resizing. Default is bilinear. Please refer\n# to PIL.Image.Resampling for more details.\nimage_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None\n\n# optional overrides to the base model configuration\noverrides_of_model_config: dict[str, Any] | None\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs: dict[str, Any] | None\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good\n# choice too\ntype_of_model: str | None\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model: str | None\n\nmax_packed_sequence_len: int | None\nrope_scaling: Any | None\nnoisy_embedding_alpha: float | None\ndpo_beta: float | None\nevaluation_strategy: str | None\neval_table_size: int | None\neval_max_new_tokens: int | None\ndpo_use_logits_to_keep: bool | None\ndpo_generate_during_eval: bool | None\ndpo_norm_loss: bool | None\nrpo_alpha: float | None",
+ "text": "# Allow overwrite yml config using from cli\nstrict: bool | None = False\n# Resume from a specific checkpoint dir\nresume_from_checkpoint: str | None\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: bool | None\n# Resize the model embeddings when new tokens are added to multiples of 32. This is\n# reported to improve training speed on some models\nresize_token_embeddings_to_32x: bool | None\nmean_resizing_embeddings: bool | None = False\n\n# Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings: bool | None\n# Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs\nembeddings_skip_upcast: bool | None\n# Reinitialize model weights randomly instead of loading pretrained weights\nreinit_weights: bool | None\n\n# module to custom trainer class to use for training\ntrainer_cls: str | None\n\n# Use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo', 'ebft'\nrl: RLType | None\n\ntrl: TRLConfig | None\n # For TRLConfig:\n # Beta parameter for the RL training. Same as `rl_beta`. Use\n beta: float | None\n # Maximum length of the completion for RL training.\n max_completion_length: int | None\n\n # Whether to use VLLM for RL training.\n use_vllm: bool = False\n # VLLM mode to use, one of 'server' or 'colocate'\n vllm_mode: Literal['server', 'colocate'] | None\n # Host of the vLLM server to connect to.\n vllm_server_host: str | None = 0.0.0.0\n # Port of the vLLM server to connect to.\n vllm_server_port: int | None = 8000\n # Total timeout (in seconds) to wait for the vLLM server to respond.\n vllm_server_timeout: int | None\n # Regex for vLLM guided decoding.\n vllm_guided_decoding_regex: str | None\n\n # List of reward functions to load. Paths must be importable from current dir.\n reward_funcs: list[str] | None\n # List of reward weights for the reward functions.\n reward_weights: list[float] | None\n # Batch size for generation. Controls how many unique prompts are generated per step.\n # Should be num_generations * data_parallel_size for full DP utilization.\n generation_batch_size: int | None\n # Number of generations to sample.\n num_generations: int | None\n # Whether to log completions.\n log_completions: bool | None = False\n # Number of completions to print when log_completions is True.\n num_completions_to_print: int | None\n # Controls whether importance sampling ratios are computed at the `'token'` or\n # `'sequence'` level. For GSPO, use `sequence`, default is None which corresponds to\n # the original GRPO paper.\n importance_sampling_level: Literal['sequence', 'token'] | None\n\n # Whether to sync the reference model.\n sync_ref_model: bool | None = False\n # Mixup alpha for the reference model.\n ref_model_mixup_alpha: float | None = 0.9\n # Sync steps for the reference model.\n ref_model_sync_steps: int | None = 64\n # Whether to scale rewards by their standard deviation.\n scale_rewards: bool = True\n\n # Sampling temperature for the GRPO policy.\n temperature: float | None\n # Top-p sampling probability for the generation policy.\n top_p: float | None\n # Top-k sampling for the generation policy.\n top_k: int | None\n # Minimum probability for the generation policy.\n min_p: float | None\n # Penalty for tokens that appear in prompt and generated text.\n repetition_penalty: float | None\n # Additional generation parameters passed to vLLM SamplingParams. Useful for\n # stop_token_ids, seed, frequency_penalty, etc.\n generation_kwargs: dict[str, Any] | None\n # Additional kwargs for the chat template. E.g., {enable_thinking: false} for Qwen3.5\n # models.\n chat_template_kwargs: dict[str, Any] | None\n # Number of iterations per batch (μ) for GRPO.\n num_iterations: int | None\n # Epsilon value for clipping in the GRPO algorithm.\n epsilon: float | None\n # Upper-bound epsilon value for clipping in the GRPO algorithm.\n epsilon_high: float | None\n # Whether to use Liger loss for GRPO.\n use_liger_loss: bool | None\n # Loss formulation to use. Supported values: grpo, bnpo, dr_grpo.\n loss_type: str | None\n # Whether to exclude truncated completions from loss calculation.\n mask_truncated_completions: bool = False\n # Enable sleep mode for vLLM to offload VRAM when idle\n vllm_enable_sleep_mode: bool | None\n # Path to custom rollout function. Must be importable from current dir.\n rollout_func: str | None\n # Multi-objective reward aggregation strategy. 'sum_then_normalize' (GRPO default):\n # weights and sums rewards first, then normalizes. 'normalize_then_sum' (GDPO):\n # normalizes each reward independently, then sums.\n multi_objective_aggregation: Literal['sum_then_normalize', 'normalize_then_sum'] | None\n\n # Use the GRPODataProducer protocol for online data generation.\n use_data_producer: bool = False\n # Generate rollouts in a background thread while training on the previous rollout.\n async_prefetch: bool = False\n # Number of rollouts to prefetch ahead of training.\n prefetch_depth: int | None\n # Sync model weights to vLLM every N optimizer steps (async mode only).\n vllm_sync_interval: int | None\n # Score prompt groups incrementally instead of the full batch at once.\n streaming_partial_batch: bool | None\n # Minimum prompt groups to score per streaming chunk.\n streaming_min_groups: int | None\n # Apply IS correction for distribution mismatch between vLLM and training model.\n vllm_importance_sampling_correction: bool | None\n # IS mode: token_truncate, token_mask, sequence_truncate, or sequence_mask.\n vllm_importance_sampling_mode: Literal['token_truncate', 'token_mask', 'sequence_truncate', 'sequence_mask'] | None\n # Cap C for IS ratio clipping/masking.\n vllm_importance_sampling_cap: float | None\n # KL threshold for off-policy sequence masking (OPSM). None = disabled.\n off_policy_mask_threshold: float | None\n # Apply IS correction to KL divergence term.\n use_bias_correction_kl: bool | None\n\n # Number of persistent subprocess workers for parallel reward computation. Each worker\n # has its own main thread so signal.alarm() (used by math_verify) works correctly.\n # Work is sharded across workers by prompt groups. Only used with\n # use_data_producer=True and non-nn.Module reward functions.\n reward_num_workers: int = 1\n # [Experimental, disabled by default] Size of the replay buffer for storing high-\n # signal rollout groups. When > 0, groups with reward variance are cached and used to\n # replace zero-signal groups (where all rewards are identical). Set to 0 to disable.\n # Only used with use_data_producer=True.\n replay_buffer_size: int = 0\n # When True (default), recompute old_per_token_logps for replayed groups using the\n # current training model. This fixes the importance sampling mismatch that occurs when\n # replaying stale data. Only relevant when replay_buffer_size > 0.\n replay_recompute_logps: bool = True\n # Fraction of total training steps after which deferred re-rolling begins. Zero-signal\n # prompts (where all rewards in a group are identical) are buffered and re-injected\n # into later batches when the model is more likely to solve them. Set to 1.0 to\n # disable. Only used with use_data_producer=True.\n reroll_start_fraction: float = 1.0\n # Maximum number of prompt groups to replace with re-roll candidates per batch. Higher\n # values increase data utilization but reduce prompt diversity. Only used with\n # use_data_producer=True.\n reroll_max_groups: int = 1\n # When True, skip gradient computation for micro-batches where all advantages are zero\n # (no learning signal). This avoids the forward/backward pass entirely when no\n # learning signal is present. The step is logged with skipped_zero_adv_batches=1 for\n # monitoring.\n skip_zero_advantage_batches: bool = True\n # Sync LoRA adapter to vLLM via filesystem instead of merging + NCCL broadcast. Auto-\n # selects vllm_serve_lora serve module. Syncs only LoRA adapter weights vs full merged\n # model.\n vllm_lora_sync: bool = False\n\nvllm: VllmConfig | None\n # For VllmConfig:\n # Device to use for VLLM\n device: str | None = auto\n # Tensor parallel size for VLLM\n tensor_parallel_size: int | None\n # Data parallel size for VLLM\n data_parallel_size: int | None\n # GPU memory utilization for VLLM\n gpu_memory_utilization: float | None = 0.9\n # Data type for VLLM\n dtype: str | None = auto\n # Maximum length of the model context for VLLM\n max_model_len: int | None\n # Enable prefix caching for VLLM\n enable_prefix_caching: bool | None\n # Host for the vLLM server to start on\n host: str | None = 0.0.0.0\n # Port of the vLLM server to start on\n port: int | None = 8000\n\n # Enable reasoning for VLLM\n enable_reasoning: bool | None\n # Reasoning parser for VLLM\n reasoning_parser: str | None\n # Disable CUDA graph capture in vLLM. Required for models with causal_conv1d (e.g.,\n # Qwen3.5 hybrid linear attention).\n enforce_eager: bool | None\n # Python module for vLLM serve script. Set to 'axolotl.scripts.vllm_serve_lora' for\n # native LoRA support, or leave None for default TRL serve.\n serve_module: str | None\n # vLLM worker extension class for weight synchronization. Defaults to\n # 'trl.scripts.vllm_serve.WeightSyncWorkerExtension'.\n worker_extension_cls: str | None\n\n# Configuration for Energy-Based Fine-Tuning (EBFT)\nebft: EBFTConfig | None\n # For EBFTConfig:\n # Fractional layer depths for feature extraction (e.g., [0.25, 0.5, 0.75])\n feature_layers: list[float] = [0.25, 0.5, 0.75]\n # Embedding method: 'last_token', 'mean_pooling', 'completion_mean', or 'concat'\n embed_method: Literal['last_token', 'mean_pooling', 'completion_mean', 'concat'] = last_token\n # Apply SVD whitening to feature embeddings\n use_whitening: bool = False\n # Coefficient for alignment reward (cosine similarity with ground truth)\n alignment_coef: float = 1.0\n # Coefficient for diversity penalty (pairwise similarity between samples)\n diversity_coef: float = 1.0\n # Cross-entropy loss coefficient on ground-truth tokens\n ce_coef: float = 0.0\n # Set per-batch max_tokens based on ground-truth length\n adaptive_max_tokens: bool = True\n # Multiplier for ground-truth token count when computing adaptive max_tokens\n gt_length_multiplier: float = 1.5\n\n # EBFT mode: 'structured' (QA with vLLM) or 'strided' (unstructured text)\n mode: Literal['structured', 'strided'] = structured\n # Stride between anchor points (tokens)\n stride: int = 8\n # Context window size per block\n context_length: int = 8\n # Tokens to generate per block\n generate_max_len: int = 8\n # Independent rollouts per document\n n_samples_per_prompt: int = 4\n # Sampling temperature for strided generation\n temperature: float = 0.6\n # Top-p nucleus sampling threshold\n top_p: float = 1.0\n # RL policy gradient loss coefficient\n rl_coef: float = 1.0\n # Advantage estimator: 'rloo', 'group_norm', 'reinforce'\n advantage_estimator: Literal['rloo', 'group_norm', 'reinforce'] = rloo\n # Minimum tokens into completion before placing anchors. Skips anchors too close to\n # the prompt boundary where features are dominated by prompt context.\n min_completion_prefix: int = 0\n\nqat: QATConfig | None\n # For QATConfig:\n # Fake quantization layout to use for activation quantization.\n activation_dtype: TorchAOQuantDType | None\n # Fake quantization layout to use for weight quantization.\n weight_dtype: TorchAOQuantDType = TorchAOQuantDType.int8\n # Quantize embedding\n quantize_embedding: bool | None = False\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n # The number of steps to apply fake quantization after\n fake_quant_after_n_steps: int | None\n\nquantization: PTQConfig | None\n # For PTQConfig:\n # Fake quantization layout to use for weight quantization.\n weight_dtype: TorchAOQuantDType = TorchAOQuantDType.int8\n # Fake quantization layout to use for activation quantization.\n activation_dtype: TorchAOQuantDType | None\n # Whether to quantize the embedding layer.\n quantize_embedding: bool | None\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n\n# Reward modelling: `True` or `False`\nreward_model: bool | None\n\n# Configuration for dynamic checkpointing (trigger by file or signal). Set 'enabled:\n# true' to activate this feature.\ndynamic_checkpoint: DynamicCheckpointConfig | None\n # For DynamicCheckpointConfig:\n # Enable dynamic checkpoint triggering during training. Create a file\n # 'axolotl_checkpoint.save' in the configured `output_dir` to trigger.\n enabled: bool = False\n # Check for trigger file every N steps (reduces I/O overhead). Default: 100\n check_interval: int = 10\n # Custom trigger filename (optional). If not specified, defaults to\n # 'axolotl_checkpoint.save'. Specify a filename (not a full path) to override the\n # default.\n trigger_file_path: str = \n\n# Process reward modelling: `True` or `False`\nprocess_reward_model: bool | None\n# Coefficient to incentivize the reward model to output mean-zero rewards (proposed by\n# https://huggingface.co/papers/2312.09244, Eq. 2). Recommended value: `0.01`.\ncenter_rewards_coefficient: float | None\nnum_labels: int | None\n\n# Whether to perform weighting in DPO trainer\ndpo_use_weighting: bool | None\ndpo_label_smoothing: float | None\n# Precompute reference model log probabilities for DPO\nprecompute_ref_log_probs: bool | None\n\n# Whether to use Liger kernel for DPO loss.\ndpo_use_liger_kernel: bool | None\n\ndpo_padding_free: bool | None\n\n# List of DPO losses to use.\ndpo_loss_type: Annotated[list[str], MinLen(1)] | None\n\n# Weights for each DPO loss.\ndpo_loss_weights: Annotated[list[float], MinLen(1)] | None\n\n# A list of one or more datasets to finetune the model with\ndatasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset | SyntheticDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n # Key containing the reasoning trace (default: \"reasoning_content\").\n field_thinking: str | None\n # The key the chat template expects that indicates the reasoning trace.\n template_thinking_key: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n # For SyntheticDataset:\n path: Literal['synthetic'] = synthetic\n type: Literal['_synthetic'] = _synthetic\n # Number of rows to generate\n length: int = 1000\n # Sequence length per row (defaults to sequence_len from config)\n sequence_length: int | None\n # Minimum token ID for generation\n min_input_id: int = 100\n # Maximum token ID for generation (defaults to tokenizer vocab_size)\n max_input_id: int | None\n # Random seed for reproducibility\n seed: int | None\n\n# A list of one or more datasets to eval the model with. You can use either\n# test_datasets, or val_set_size, but not both.\ntest_datasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset | SyntheticDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n # Key containing the reasoning trace (default: \"reasoning_content\").\n field_thinking: str | None\n # The key the chat template expects that indicates the reasoning trace.\n template_thinking_key: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n # For SyntheticDataset:\n path: Literal['synthetic'] = synthetic\n type: Literal['_synthetic'] = _synthetic\n # Number of rows to generate\n length: int = 1000\n # Sequence length per row (defaults to sequence_len from config)\n sequence_length: int | None\n # Minimum token ID for generation\n min_input_id: int = 100\n # Maximum token ID for generation (defaults to tokenizer vocab_size)\n max_input_id: int | None\n # Random seed for reproducibility\n seed: int | None\n\n# If false, the datasets will not be shuffled and will keep their original order in\n# `datasets`. The same applies to the `test_datasets` option and the\n# `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: bool | None = True\n# If true, each dataset in `datasets` will be shuffled before merging. This allows\n# curriculum learning strategies to be applied at the dataset level. Default is false.\nshuffle_before_merging_datasets: bool | None = False\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: str | None\n# Num shards for whole dataset\ndataset_shard_num: int | None\n# Index of shard to use for whole dataset\ndataset_shard_idx: int | None\nskip_prepare_dataset: bool | None = False\n# Number of shards to save the prepared dataset\nnum_dataset_shards_to_save: int | None\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset: Annotated[list[PretrainingDataset | SFTDataset], MinLen(1)] | None\n # For PretrainingDataset:\n name: str | None\n path: str | None\n split: str | None = train\n text_column: str | None = text\n type: str | None = pretrain\n trust_remote_code: bool | None = False\n data_files: str | None\n skip: int | None\n\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n # Key containing the reasoning trace (default: \"reasoning_content\").\n field_thinking: str | None\n # The key the chat template expects that indicates the reasoning trace.\n template_thinking_key: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n# The maximum number of processes to use while preprocessing your input dataset. This\n# defaults to `os.cpu_count()` if not set. For Runpod VMs, it will default to number of\n# vCPUs via RUNPOD_CPU_COUNT.\ndataset_processes: int | None\n# The maximum number of processes to use while preprocessing your input dataset. This\n# defaults to `os.cpu_count()` if not set. For Runpod VMs, it will default to number of\n# vCPUs via RUNPOD_CPU_COUNT.\ndataset_num_proc: int | None\n\n# Deduplicates datasets and test_datasets with identical entries\ndataset_exact_deduplication: bool | None\n# Keep dataset in memory while preprocessing. Only needed if cached dataset is taking\n# too much storage\ndataset_keep_in_memory: bool | None\ndataloader_pin_memory: bool | None\ndataloader_num_workers: int | None\ndataloader_prefetch_factor: int | None\ndataloader_drop_last: bool | None\n\naccelerator_config: dict[str, Any] | None\n\nremove_unused_columns: bool | None\n\n# Push prepared dataset to hub - repo_org/repo_name\npush_dataset_to_hub: str | None\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private\n# datasets. Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: bool | None\n\ndevice: Any | None\n# Passed through to transformers when loading the model when launched without\n# accelerate. Use `sequential` when training w/ model parallelism to limit memory\ndevice_map: Any | None\nworld_size: int | None\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank: int | None\nddp: bool | None\n\n# Seed for reproducibility\nseed: int | None\n# Advanced DDP Arguments - timeout\nddp_timeout: int | None\n# Advanced DDP Arguments - bucket cap in MB\nddp_bucket_cap_mb: int | None\n# Advanced DDP Arguments - broadcast buffers\nddp_broadcast_buffers: bool | None\nddp_find_unused_parameters: bool | None\n\n# Whether to run causal language model evaluation for metrics in\n# `eval_causal_lm_metrics`\ndo_causal_lm_eval: bool | None\n# HF evaluate metrics used during evaluation. Default is ['sacrebleu', 'comet', 'ter',\n# 'chrf', 'perplexity']\neval_causal_lm_metrics: list[str] | None\ndo_bench_eval: bool | None\nbench_dataset: str | None\nbench_split: str | None\nmetric_for_best_model: str | None\ngreater_is_better: bool | None\n\n# High loss value, indicating the learning has broken down (a good estimate is ~2 times\n# the loss at the start of training)\nloss_watchdog_threshold: float | None\n# Number of high-loss steps in a row before the trainer aborts (default: 3)\nloss_watchdog_patience: int | None\n\n# Run garbage collection every `gc_steps` steps. -1 will run on epoch end and before\n# evaluations. Default is 0 (disabled).\ngc_steps: int | None\n\n# Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection.\n# require >=ampere\nbf16: Literal['auto'] | bool | None = auto\n# Use CUDA fp16\nfp16: bool | None\n# Enable FP8 mixed precision training using TorchAO. Best used in combination with\n# torch.compile.\nfp8: bool | None\n# Enable FSDP float8 all-gather optimization for FP8 training. Can improve training\n# speed by 10-15% when FSDP is enabled.\nfp8_enable_fsdp_float8_all_gather: bool | None\n# No AMP (automatic mixed precision) - require >=ampere\nbfloat16: bool | None\n# No AMP (automatic mixed precision)\nfloat16: bool | None\n# bool to use CUDA tf32 or 'auto' for automatic detection - require >=ampere\ntf32: Literal['auto'] | bool | None = auto\nfloat32: bool | None\n\n# Whether to use gradient checkpointing. Available options are: true, false, 'offload',\n# 'offload_disk'.\n# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: Literal['offload', 'offload_disk'] | bool | None = False\n# Additional kwargs to pass to the trainer for gradient checkpointing\ngradient_checkpointing_kwargs: dict[str, Any] | None\n# Whether to offload activations. Available options are: true, false, 'legacy', 'disk'.\nactivation_offloading: Literal['legacy', 'disk'] | bool | None = False\n# Offload model layer parameters to CPU during forward, prefetch back during backward.\nlayer_offloading: bool | None = False\n\n# Freeze multimodal encoder parameters (vision, audio, etc.) for text-only training of\n# multimodal models. When True, parameters belonging to vision towers, audio towers,\n# multimodal projectors, and similar non-language modules are frozen\n# (requires_grad=False). This allows DDP training without\n# ddp_find_unused_parameters=True.\nfreeze_mm_modules: bool | None\n\n# List of regex patterns for parameter names to keep unfrozen. All other parameters will\n# be frozen via requires_grad=False. Note: range-based patterns (e.g.\n# embed_tokens.weight$[:32000]) use gradient zeroing rather than a true freeze, so\n# weight decay will still apply to the frozen portion and optimizer states are allocated\n# for the full parameter.\nunfrozen_parameters: list[str] | None\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: int = 512\n# What to do when a tokenized row exceeds sequence_len. 'drop' removes the row;\n# 'truncate' slices tensors to sequence_len; 'raise' raises a ValueError. Defaults to\n# 'drop' for backward compatibility.\nexcess_length_strategy: Literal['drop', 'truncate', 'raise'] | None\n# The maximum length of an input for evaluation. If not specified, defaults to\n# sequence_len\neval_sequence_len: int | None\nmin_sample_len: int | None\n# maximum prompt length for RL training\nmax_prompt_len: int | None\n# Use efficient multi-packing with block diagonal attention and per sequence\n# position_ids. Recommend set to 'true'\nsample_packing: bool | None\n# The number of samples packed at a time. Increasing the following values helps with\n# packing, but usually only slightly (<%1.)\nsample_packing_group_size: int | None = 100000\n# The number of samples which can be packed into one sequence. Increase if using a large\n# sequence_len with many short samples.\nsample_packing_bin_size: int | None = 200\n# Whether to pack samples sequentially\nsample_packing_sequentially: bool | None\n# The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or\n# 'forkserver'\nsample_packing_mp_start_method: str | None\n# Set to 'false' if getting errors during eval with sample_packing on\neval_sample_packing: bool | None\n# Pad inputs so each step uses constant sized buffers. This will reduce memory\n# fragmentation and may prevent OOMs, by re-using memory more efficiently. Defaults to\n# True if `sample_packing` enabled\npad_to_sequence_len: bool | None\n# Pad each batch to a multiple of this value.\npad_to_multiple_of: int | None\n# Whether to use sequential sampling for curriculum learning\ncurriculum_sampling: bool | None\nmultipack_real_batches: bool | None\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening: Literal['auto'] | bool | None\n\nuse_pose: bool | None\npose_split_on_token_ids: list[int] | None\npose_max_context_len: int | None\npose_num_chunks: int | None\n\npretrain_multipack_buffer_size: int | None\n# whether to prevent cross attention for packed sequences during pretraining\npretrain_multipack_attn: bool | None = True\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation: bool | None\n\n# Use streaming mode for loading datasets\nstreaming: bool | None\n# Buffer size for multipack streaming datasets\nstreaming_multipack_buffer_size: int | None = 10000\n\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers\nxformers_attention: bool | None\n# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/\n# torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention: bool | None\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention: bool | None\nflex_attention: bool | None\nflex_attn_compile_kwargs: dict[str, Any] | None\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention\nflash_attention: bool | None\n# Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_cross_entropy: bool | None\n# Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_rms_norm: bool | None\n# Whether to fuse part of the MLP into a single operation\nflash_attn_fuse_mlp: bool | None\n# Whether to use bettertransformers\nflash_optimum: bool | None\n# Whether to use SageAttention https://github.com/thu-ml/SageAttention\nsage_attention: bool | None\n\neager_attention: bool | None\n\n# Specify a custom attention implementation, used mostly for kernels.\nattn_implementation: str | None\n\n# Use hybrid attention for Gemma 4: flash_attention_2 for sliding window layers and sdpa\n# for global (full_attention) layers. Global layers have head_dim=512 which exceeds\n# flash attention's supported size.\ngemma4_hybrid_attn_impl: bool | None\n\n# Which experts implementation to use for MoE models,\nexperts_implementation: str | None\n\n# Quantize MoE expert weights on load to reduce VRAM. Requires adapter (lora/qlora) with\n# load_in_4bit or load_in_8bit. Requires CUDA (not compatible with ROCm or other\n# backends). Note: total parameter count may be reported incorrectly when enabled\n# (trainable param count is correct).\nquantize_moe_experts: bool = False\n\n# Whether to use Scaled Softmax (SSMax) attention. Ref: https://arxiv.org/abs/2501.19399\nscaling_softmax: bool | None\n# Scaling factor for SSMax attention. Default is 0.43\nscaling_softmax_factor: float | None\n# Bias for SSMax attention. Default is 0.0. Note: The paper recommends bias=0 for better\n# length generalization.\nscaling_softmax_bias: float | None\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_mlp_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_qkv_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_o_kernel: bool | None\n# Apply custom LoRA autograd function for embedding layers. See:\n# https://docs.axolotl.ai/docs/lora_optims.html\nlora_embedding_kernel: bool | None\n\n# Whether to use chunked cross entropy loss for memory efficiency\nchunked_cross_entropy: bool | None\n# Number of chunks to use for chunked cross entropy loss\nchunked_cross_entropy_num_chunks: int | None\n# Enable Entropy-Aware Focal Training loss (EAFT)\nuse_eaft: bool | None\n# Exponent for entropy weighting in EAFT (default: 1.0)\neaft_alpha: float | None = 1.0\n# Number of top logits for entropy approximation (default: 20)\neaft_k: int | None = 20\n\n# Whether to use ALST tiled mlp for memory efficient long context\ntiled_mlp: bool | None\n\n# Number of shards to use for ALST tiled mlp. If unset, it will be set based on\n# seqlen/hidden_size\ntiled_mlp_num_shards: int | None\n\n# Whether to use original mlp for ALST tiled mlp. Otherwise uses a generic MLP based on\n# llama.\ntiled_mlp_use_original_mlp: bool | None = True\n\nllama4_linearized_experts: bool | None\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed: str | dict[str, Any] | None\n# Whether to use deepcompile for faster training with deepspeed\ndeepcompile: bool | None\n# FSDP configuration\nfsdp: list[str] | None\n\n# FSDP configuration options\nfsdp_config: FSDPConfig | None\n # For FSDPConfig:\n # FSDP version\n fsdp_version: int | None\n # Enable activation checkpointing to reduce memory usage during forward passes\n activation_checkpointing: bool | None\n # Offload parameters to CPU to reduce GPU memory usage\n offload_params: bool | None\n # Synchronize module states across all processes\n sync_module_states: bool | None\n # Enable CPU RAM efficient loading to reduce memory usage during model loading\n cpu_ram_efficient_loading: bool | None\n # Disabling this enables swap memory usage for resource-constrained setups when\n # offload_params is enabled.\n cpu_offload_pin_memory: bool | None\n # Use original parameters instead of flattened parameters\n use_orig_params: bool | None\n\n # Type of state dict to use for saving/loading checkpoints\n state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n # Final state dict type to use after training completion\n final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n\n # Policy for automatically wrapping modules with FSDP\n auto_wrap_policy: Literal['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP'] | None\n # Class name of transformer layers to wrap (e.g., 'LlamaDecoderLayer')\n transformer_layer_cls_to_wrap: str | None\n\n # Reshard parameters after forward pass to save memory\n reshard_after_forward: bool | None\n # Mixed precision policy for FSDP (e.g., 'fp16', 'bf16')\n mixed_precision_policy: str | None\n\n# FSDP version\nfsdp_version: int | None\nfsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for\n# no eval.\nval_set_size: float | None = 0.0\n\n# Number of devices to shard across. If not set, will use all available devices.\ndp_shard_size: int | None\n# Number of devices to replicate across.\ndp_replicate_size: int | None\n# Deprecated: use `context_parallel_size` instead\nsequence_parallel_degree: int | None\n# Set to a divisor of the number of GPUs available to split sequences into chunks of\n# equal size. Use in long context training to prevent OOM when sequences cannot fit into\n# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each\n# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized\n# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more\n# details.\ncontext_parallel_size: int | None\n# Optional; strides across the key dimension. Larger values use more memory but should\n# make training faster. Must evenly divide the number of KV heads in your model.\nheads_k_stride: int | None\n# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to\n# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing\n# case.\nring_attn_func: RingAttnFunc | None\n# Number of tensor parallel processes in TP group. Only supported with DeepSpeed AutoTP.\ntensor_parallel_size: int | None\n\n# Add or change special tokens. If you add tokens here, you don't need to add them to\n# the `tokens` list.\nspecial_tokens: SpecialTokensConfig | None\n # For SpecialTokensConfig:\n bos_token: str | None\n eos_token: str | None\n pad_token: str | None\n unk_token: str | None\n additional_special_tokens: list[str] | None\n\n# Add extra tokens to the tokenizer\ntokens: list[str] | None\n# Mapping token_id to new_token_string to override reserved added_tokens in the\n# tokenizer. Only works for tokens that are not part of the base vocab (aka are\n# added_tokens). Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: dict[int, str] | None\n\n# Whether to use torch.compile and which backend to use.\ntorch_compile: Literal['auto'] | bool | None\n# Backend to use for torch.compile\ntorch_compile_backend: str | None\ntorch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None\n\n# Maximum number of iterations to train for. It precedes num_epochs which means that if\n# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =>\n# `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps: int | None\n# Number of warmup steps. Cannot use with warmup_ratio\nwarmup_steps: int | None\n# Warmup ratio. Cannot use with warmup_steps\nwarmup_ratio: float | None\n# Leave empty to eval at each epoch, integer for every N steps. float for fraction of\n# total steps\neval_steps: int | float | None\n# Number of times per epoch to run evals, mutually exclusive with eval_steps\nevals_per_epoch: int | None\n# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer\n# from `eval_steps`\neval_strategy: str | None\n\n# Leave empty to save at each epoch, integer for every N steps. float for fraction of\n# total steps\nsave_steps: int | float | None\n# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsaves_per_epoch: int | None\n# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better\n# result is achieved, leave empty to infer from `save_steps`\nsave_strategy: str | None\n# Checkpoints saved at a time\nsave_total_limit: int | None\n# Whether to checkpoint a model after the first step of training. Defaults to False.\nsave_first_step: bool | None\n\n# Logging frequency\nlogging_steps: int | None\n# Stop training after this many evaluation losses have increased in a row. https://huggi\n# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin\n# gCallback\nearly_stopping_patience: int | None\nload_best_model_at_end: bool | None = False\n# Save only the model weights, skipping the optimizer. Using this means you can't resume\n# from checkpoints.\nsave_only_model: bool | None = False\n# Use tensorboard for logging\nuse_tensorboard: bool | None\n# Enable the pytorch profiler to capture the first N steps of training to the\n# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more\n# information. Snapshots can be visualized @ https://pytorch.org/memory_viz\nprofiler_steps: int | None\n# Which step to start the profiler at. Useful for only capturing a few steps mid-run.\nprofiler_steps_start: int | None = 0\n# bool of whether to report tokens per second at the end of training. This is not\n# supported with pre-training datasets.\ninclude_tokens_per_second: bool | None\n# bool of whether to report tokens per second per-gpu during training by measuring\n# throughput of non-padding tokens.\ninclude_tkps: bool | None = True\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to\n# add noise to embeddings. Currently only supported on Llama and Mistral\nneftune_noise_alpha: float | None\n\n# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to\n# `beta` in `ORPOConfig` due to trl mapping.\norpo_alpha: float | None\n# Target reward margin for the SimPO loss\nsimpo_gamma: float | None\n# Weight of the BC regularizer\ncpo_alpha: float | None\n\n# Factor for desirable loss term in KTO loss\nkto_desirable_weight: float | None\n# Factor for undesirable loss term in KTO loss\nkto_undesirable_weight: float | None\n# The beta parameter for the RL training\nrl_beta: float | None\n\n# Defines the max memory usage per gpu on the system. Passed through to transformers\n# when loading the model.\nmax_memory: dict[int | Literal['cpu', 'disk'], int | str] | None\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in\n# gigabytes); default: unset\ngpu_memory_limit: int | str | None\n# Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage: bool | None\n\n# The name of the chat template to use for training, following values are supported:\n# tokenizer_default: Uses the chat template that is available in the\n# tokenizer_config.json. If the chat template is not available in the tokenizer, it will\n# raise an error. This is the default value.\n# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.\n# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not\n# available in the tokenizer. jinja: Uses a custom jinja template for the chat template.\n# The custom jinja template should be provided in the chat_template_jinja field. The\n# selected chat template will be saved to the tokenizer_config.json for easier\n# inferencing\nchat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None\n# Custom jinja template or path to jinja file for chat template. This will be only used\n# if chat_template is set to `jinja` or `null` (in which case chat_template is\n# automatically set to `jinja`). Default is null.\nchat_template_jinja: str | None\n# Additional kwargs to pass to the chat template. This is useful for customizing the\n# chat template. For example, you can pass `thinking=False` to add a generation prompt\n# to the chat template.\nchat_template_kwargs: dict[str, Any] | None\n# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the\n# boundaries between conversation turns. For example: ['/INST', '</s>',\n# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is\n# useful for templates that use multiple delimiter tokens.\neot_tokens: list[str] | None\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: str | None\n\n# Token index or indices to adjust embedding weights to the mean of the other tokens.\n# This is useful when the model has untrained embeddings.\nfix_untrained_tokens: int | list[int] | None\n\nis_preprocess: bool | None\npreprocess_iterable: bool | None\n\n# Total number of tokens - internal use\ntotal_num_tokens: int | None\ntotal_supervised_tokens: int | None\n# You can set these packing optimizations AFTER starting a training at least once. The\n# trainer will provide recommended values for these values.\nsample_packing_eff_est: float | None\naxolotl_config_path: str | None\n\n# Internal use only - Used to identify which the model is based on\nis_falcon_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_llama_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on. Please note that if\n# you set this to true, `padding_side` will be set to 'left' by default\nis_mistral_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_qwen_derived_model: bool | None\n\n# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available\n# plugins or doc below for more details.\n# https://docs.axolotl.ai/docs/custom_integrations.html\nplugins: list[str] | None\n# Enable sample generation during training for monitoring\ngenerate_samples: bool | None = False\n# Number of samples to generate at each interval\nnum_generation_samples: int | None = 3\n# Maximum new tokens to generate per sample\ngeneration_max_new_tokens: int | None = 50\n# Temperature for sample generation (0.0 = greedy)\ngeneration_temperature: float | None = 0.7\n# Nucleus sampling parameter for generation\ngeneration_top_p: float | None\n# Top-k sampling parameter for generation\ngeneration_top_k: int | None\n# Ratio of input to use as prompt (0.0-1.0)\ngeneration_prompt_ratio: float | None = 0.5\n# Whether to use sampling (vs greedy decoding)\ngeneration_do_sample: bool | None = True\n\n# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This\n# can also be a relative path to a model on disk\nbase_model: str (required)\n# If the base_model repo on hf hub doesn't include configuration .json files, You can\n# set that here, or leave this empty to default to base_model\nbase_model_config: str | None\n# transformers config class (e.g., 'LlamaConfig', 'MistralConfig'). Defaults to\n# AutoConfig.\ncls_model_config: str | None\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config: str | None\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast: bool | None\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy: bool | None\n# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-\n# common tokenizer.\ntokenizer_use_mistral_common: bool | None\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: str | None\n# transformers processor class\nprocessor_type: str | None\n# kwargs forwarded to the processor's from_pretrained(), overriding processor config\n# (e.g. image_seq_length, min_pixels, etc.).\nprocessor_kwargs: dict[str, Any] | None\n# Whether to save jinja files for tokenizer, transformers default is True\ntokenizer_save_jinja_files: bool | None = True\n# Trust remote code for untrusted source\ntrust_remote_code: bool | None\n\n# Don't move the model to the device before sharding. Set to `false` to revert to legacy\n# behavior.\nexperimental_skip_move_to_device: bool | None = True\n\n# Use custom kernels, e.g. MegaBlocks.\nuse_kernels: bool | None\n\n# Model loading quantization config\nmodel_quantization_config: Literal['Mxfp4Config', 'FineGrainedFP8Config'] | None\n# kwargs for model quantization config\nmodel_quantization_config_kwargs: dict[str, Any] | None\n\n# Where to save the full-finetuned model to\noutput_dir: str = ./model-out\n# push checkpoints to hub\nhub_model_id: str | None\n# how to push checkpoints to hub\nhub_strategy: str | None\n# branch/revision to push to on hub (default: main)\nhub_revision: str | None\n# Whether to save the model using safetensors format. Defaults to True.\nsave_safetensors: bool | None = True\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: bool | None = False\n# Use bitsandbytes 4 bit\nload_in_4bit: bool | None = False\n\n# If you want to use 'lora', 'qlora', or 'llama-adapter', or leave blank to train all\n# parameters in original model\nadapter: Literal['lora', 'qlora', 'llama-adapter'] | None\n# If you already have a lora model trained that you want to load, put that here. This\n# means after training, if you want to test the model, you should set this to the value\n# of `output_dir`. Note that if you merge an adapter to the base model, a new\n# subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir: str | None\nlora_r: int | None\nlora_alpha: int | None\nlora_fan_in_fan_out: bool | None\nlora_target_modules: str | list[str] | None\nlora_target_parameters: str | list[str] | None\n# If true, will target all linear modules\nlora_target_linear: bool | None\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules\n# because they need to know the new tokens. For LLaMA and Mistral, you need to save\n# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts\n# tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\nlora_modules_to_save: list[str] | None\nlora_dropout: float | None = 0.0\n# The layer indices to transform, otherwise, apply to all layers\npeft_layers_to_transform: list[int] | None\npeft_layers_pattern: list[str] | None\n\npeft: PeftConfig | None\n # For PeftConfig:\n # Configuration options for loftq initialization for LoRA\n loftq_config: LoftQConfig | None\n # For LoftQConfig:\n # typically 4 bits\n loftq_bits: int = 4\n\n# Whether to use DoRA.\npeft_use_dora: bool | None\n# Whether to use RSLoRA.\npeft_use_rslora: bool | None\n# List of layer indices to replicate.\npeft_layer_replication: list[tuple[int, int]] | None\n# How to initialize LoRA weights. Default to True which is MS original implementation.\npeft_init_lora_weights: bool | str | None\n# A list of token indices to fine-tune on the `embed_tokens` layer. Otherwise, a dict\n# mapping an embedding layer name to its trainable token indices. See\n# https://huggingface.co/docs/peft/v0.17.0/en/developer_guides/lora#efficiently-train-\n# tokens-alongside-lora\npeft_trainable_token_indices: list[int] | dict[str, list[int]] | None\n# Whether to tie adapter weights for tied model weights. See\n# https://github.com/huggingface/peft/issues/2864\npeft_ensure_weight_tying: bool | None\n# Whether to upcast the LoRA adapter to fp32. This is enabled by default in PEFT.\npeft_autocast_adapter_dtype: bool | None\n\n# load qlora model in sharded format for FSDP using answer.ai technique.\nqlora_sharded_model_loading: bool | None = False\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it\n# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: bool | None\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: bool | None\n# optional overrides to the bnb 4bit quantization configuration\nbnb_config_kwargs: dict[str, Any] | None\n\n# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_ratio: float | None\n# loraplus learning rate for lora embedding layers. Default value is 1e-6.\nloraplus_lr_embedding: float | None = 1e-06\n\nmerge_lora: bool | None\n# Method to use for LoRA merging. 'memory_efficient' (default) processes shards\n# individually to reduce memory usage, 'legacy' loads the full model into memory.\nmerge_method: Literal['legacy', 'memory_efficient'] | None = memory_efficient\n\n# Whether to use ReLoRA. Use with jagged_restart_*steps options.\nrelora: bool | None\n# threshold for optimizer magnitude when pruning\nrelora_prune_ratio: float | None\n# True to perform lora weight merges on cpu during restarts, for modest gpu memory\n# savings\nrelora_cpu_offload: bool | None\n\n# how often to reset for jagged restarts\njagged_restart_steps: int | None\n# how many warmup steps to take after reset for jagged restarts\njagged_restart_warmup_steps: int | None\n# how many anneal steps to take before reset for jagged restarts\njagged_restart_anneal_steps: int | None\n\n# If greater than 1, backpropagation will be skipped and the gradients will be\n# accumulated for the given number of steps.\ngradient_accumulation_steps: int | None = 1\n# The number of samples to include in each batch. This is the number of samples sent to\n# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: int | None = 1\n# Total batch size, we do not recommended setting this manually\nbatch_size: int | None\n# per gpu micro batch size for evals, defaults to value of micro_batch_size\neval_batch_size: int | None\n\n# whether to find batch size that fits in memory. Passed to underlying transformers\n# Trainer\nauto_find_batch_size: bool | None\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: bool | None = False\n# Group similarly sized data to minimize padding. May be slower to start, as it must\n# download and sort the entire dataset. Note that training loss may have an oscillating\n# pattern with this enabled.\ngroup_by_length: bool | None\n\nlearning_rate: str | float (required)\nembedding_lr: float | None\nembedding_lr_scale: float | None\n# Specify weight decay\nweight_decay: float | None = 0.0\n# Specify optimizer\noptimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED\n# Dictionary of arguments to pass to the optimizer\noptim_args: str | dict[str, Any] | None\n# The target modules to optimize, i.e. the module names that you would like to train,\n# right now this is used only for GaLore algorithm\noptim_target_modules: list[str] | Literal['all_linear'] | None\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path: str | None\nlr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler_kwargs: dict[str, Any] | None\nlr_quadratic_warmup: bool | None\n# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of\n# peak lr\ncosine_min_lr_ratio: float | None\n# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means\n# start cosine_min_lr at 80% of training step\ncosine_constant_lr_ratio: float | None\n# Learning rate div factor\nlr_div_factor: float | None\n\nlr_groups: list[LrGroup] | None\n # For LrGroup:\n name: str (required)\n modules: list[str] (required)\n lr: float (required)\n\n# adamw hyperparams\nadam_epsilon: float | None\n# only used for CAME Optimizer\nadam_epsilon2: float | None\n# adamw hyperparams\nadam_beta1: float | None\n# adamw hyperparams\nadam_beta2: float | None\n# only used for CAME Optimizer\nadam_beta3: float | None\n\n# Dion Optimizer learning rate\ndion_lr: float | None\n# Dion Optimizer momentum\ndion_momentum: float | None\n# Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank\n# dimension.\ndion_rank_fraction: float | None = 1.0\n# Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may\n# be useful to ensure even sharding.\ndion_rank_multiple_of: int | None = 1\n\n# Gradient clipping max norm\nmax_grad_norm: float | None\nnum_epochs: float = 1.0\n\nuse_wandb: bool | None\n# Set the name of your wandb run\nwandb_name: str | None\n# Set the ID of your wandb run\nwandb_run_id: str | None\n# \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn\n# off wandb\nwandb_mode: str | None\n# Your wandb project name\nwandb_project: str | None\n# A wandb Team name if using a Team\nwandb_entity: str | None\nwandb_watch: str | None\n# \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only\n# at the end of training\nwandb_log_model: str | None\n\nuse_mlflow: bool | None\n# URI to mlflow\nmlflow_tracking_uri: str | None\n# Your experiment name\nmlflow_experiment_name: str | None\n# Your run name\nmlflow_run_name: str | None\n# set to true to copy each saved checkpoint on each save to mlflow artifact registry\nhf_mlflow_log_artifacts: bool | None\n\n# Enable or disable Comet integration.\nuse_comet: bool | None\n# API key for Comet. Recommended to set via `comet login`.\ncomet_api_key: str | None\n# Workspace name in Comet. Defaults to the user's default workspace.\ncomet_workspace: str | None\n# Project name in Comet. Defaults to Uncategorized.\ncomet_project_name: str | None\n# Identifier for the experiment. Used to append data to an existing experiment or\n# control the key of new experiments. Default to a random key.\ncomet_experiment_key: str | None\n# Create a new experiment (\"create\") or log to an existing one (\"get\"). Default\n# (\"get_or_create\") auto-selects based on configuration.\ncomet_mode: str | None\n# Set to True to log data to Comet server, or False for offline storage. Default is\n# True.\ncomet_online: bool | None\n# Dictionary for additional configuration settings, see the doc for more details.\ncomet_experiment_config: dict[str, Any] | None\n\nuse_trackio: bool | None\n# Your trackio project name\ntrackio_project_name: str | None\n# Set the name of your trackio run\ntrackio_run_name: str | None\n# Hugging Face Space ID to sync dashboard to (optional, runs locally if not provided)\ntrackio_space_id: str | None\n\n# Enable OpenTelemetry metrics collection and Prometheus export\nuse_otel_metrics: bool | None = False\n# Host to bind the OpenTelemetry metrics server to\notel_metrics_host: str | None = localhost\n# Port for the Prometheus metrics HTTP server\notel_metrics_port: int | None = 8000\n\n# the number of activate layers in LISA\nlisa_n_layers: int | None\n# how often to switch layers in LISA\nlisa_step_interval: int | None\n# path under the model to access the layers\nlisa_layers_attribute: str | None = model.layers\n\ngradio_title: str | None\ngradio_share: bool | None\ngradio_server_name: str | None\ngradio_server_port: int | None\ngradio_max_new_tokens: int | None\ngradio_temperature: float | None\n\nuse_ray: bool = False\nray_run_name: str | None\nray_num_workers: int = 1\nresources_per_worker: dict\n\n# The size of the image to resize to. It can be an integer (resized into padded-square\n# image) or a tuple (width, height).If not provided, we will attempt to load from\n# preprocessor.size, otherwise, images won't be resized.\nimage_size: int | tuple[int, int] | None\n# The resampling algorithm to use for image resizing. Default is bilinear. Please refer\n# to PIL.Image.Resampling for more details.\nimage_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None\n\n# optional overrides to the base model configuration\noverrides_of_model_config: dict[str, Any] | None\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs: dict[str, Any] | None\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good\n# choice too\ntype_of_model: str | None\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model: str | None\n\nmax_packed_sequence_len: int | None\nrope_scaling: Any | None\nnoisy_embedding_alpha: float | None\ndpo_beta: float | None\nevaluation_strategy: str | None\neval_table_size: int | None\neval_max_new_tokens: int | None\ndpo_use_logits_to_keep: bool | None\ndpo_generate_during_eval: bool | None\ndpo_norm_loss: bool | None\nrpo_alpha: float | None",
"crumbs": [
"Getting Started",
"Config Reference"
@@ -6067,7 +6067,7 @@
"href": "docs/debugging.html#debugging-with-vscode",
"title": "Debugging",
"section": "Debugging with VSCode",
- "text": "Debugging with VSCode\n\nBackground\nThe below example shows how to configure VSCode to debug data preprocessing of the chat_template format. This is the format used when you have the following in your axolotl config:\ndatasets:\n - path: <path to your chat_template formatted dataset> # example on HF Hub: fozziethebeat/alpaca_messages_2k_test\n type: chat_template\n\n[!Important]\nIf you are already familiar with advanced VSCode debugging, you can skip the below explanation and look at the files .vscode/launch.json and .vscode/tasks.json for an example configuration.\n\n\n[!Tip]\nIf you prefer to watch a video, rather than read, you can skip to the video tutorial below (but doing both is recommended).\n\n\n\nSetup\nMake sure you have an editable install of Axolotl, which ensures that changes you make to the code are reflected at runtime. Run the following commands from the root of this project:\nexport UV_TORCH_BACKEND=cu128 # or cu130\nuv sync --extra flash-attn --extra deepspeed --group dev --group test\nsource .venv/bin/activate\n\nRemote Hosts\nIf you developing on a remote host, you can easily use VSCode to debug remotely. To do so, you will need to follow this remote - SSH guide. You can also see the video below on Docker and Remote SSH debugging.\n\n\n\nConfiguration\nThe easiest way to get started is to modify the .vscode/launch.json file in this project. This is just an example configuration, so you may need to modify or copy it to suit your needs.\nFor example, to mimic the command cd devtools && CUDA_VISIBLE_DEVICES=0 axolotl train dev_chat_template.yml, you would use the below configuration1. Note that we add additional flags that override the axolotl config and incorporate the tips above (see the comments). We also set the working directory to devtools and set the env variable HF_HOME to a temporary folder that is later partially deleted. This is because we want to delete the HF dataset cache before each run in order to ensure that the data preprocessing code is run from scratch.\n// .vscode/launch.json\n{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Debug axolotl prompt - chat_template\",\n \"type\": \"python\",\n \"module\": \"accelerate.commands.launch\",\n \"request\": \"launch\",\n \"args\": [\n \"-m\", \"axolotl.cli.train\", \"dev_chat_template.yml\",\n // The flags below simplify debugging by overriding the axolotl config\n // with the debugging tips above. Modify as needed.\n \"--dataset_num_proc=1\", // limits data preprocessing to one process\n \"--max_steps=1\", // limits training to just one step\n \"--batch_size=1\", // minimizes batch size\n \"--micro_batch_size=1\", // minimizes batch size\n \"--val_set_size=0\", // disables validation\n \"--sample_packing=False\", // disables sample packing which is necessary for small datasets\n \"--eval_sample_packing=False\",// disables sample packing on eval set\n \"--dataset_prepared_path=temp_debug/axolotl_outputs/data\", // send data outputs to a temp folder\n \"--output_dir=temp_debug/axolotl_outputs/model\" // send model outputs to a temp folder\n ],\n \"console\": \"integratedTerminal\", // show output in the integrated terminal\n \"cwd\": \"${workspaceFolder}/devtools\", // set working directory to devtools from the root of the project\n \"justMyCode\": true, // step through only axolotl code\n \"env\": {\"CUDA_VISIBLE_DEVICES\": \"0\", // Since we aren't doing distributed training, we need to limit to one GPU\n \"HF_HOME\": \"${workspaceFolder}/devtools/temp_debug/.hf-cache\"}, // send HF cache to a temp folder\n \"preLaunchTask\": \"cleanup-for-dataprep\", // delete temp folders (see below)\n }\n ]\n}\nAdditional notes about this configuration:\n\nThe argument justMyCode is set to true such that you step through only the axolotl code. If you want to step into dependencies, set this to false.\nThe preLaunchTask: cleanup-for-dataprep is defined in .vscode/tasks.json and is used to delete the following folders before debugging, which is essential to ensure that the data pre-processing code is run from scratch:\n\n./devtools/temp_debug/axolotl_outputs\n./devtools/temp_debug/.hf-cache/datasets\n\n\n\n[!Tip]\nYou may not want to delete these folders. For example, if you are debugging model training instead of data pre-processing, you may NOT want to delete the cache or output folders. You may also need to add additional tasks to the tasks.json file depending on your use case.\n\nBelow is the ./vscode/tasks.json file that defines the cleanup-for-dataprep task. This task is run before each debugging session when you use the above configuration. Note how there are two tasks that delete the two folders mentioned above. The third task cleanup-for-dataprep is a composite task that combines the two tasks. A composite task is necessary because VSCode does not allow you to specify multiple tasks in the preLaunchTask argument of the launch.json file.\n// .vscode/tasks.json\n// this file is used by launch.json\n{\n \"version\": \"2.0.0\",\n \"tasks\": [\n // this task changes into the devtools directory and deletes the temp_debug/axolotl_outputs folder\n {\n \"label\": \"delete-outputs\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/axolotl_outputs\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task changes into the devtools directory and deletes the `temp_debug/.hf-cache/datasets` folder\n {\n \"label\": \"delete-temp-hf-dataset-cache\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/.hf-cache/datasets\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task combines the two tasks above\n {\n \"label\": \"cleanup-for-dataprep\",\n \"dependsOn\": [\"delete-outputs\", \"delete-temp-hf-dataset-cache\"],\n }\n ]\n}\n\n\nCustomizing your debugger\nYour debugging use case may differ from the example above. The easiest thing to do is to put your own axolotl config in the devtools folder and modify the launch.json file to use your config. You may also want to modify the preLaunchTask to delete different folders or not delete anything at all.\n\n\nVideo Tutorial\nThe following video tutorial walks through the above configuration and demonstrates how to debug with VSCode, (click the image below to watch):\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl w/VSCode",
+ "text": "Debugging with VSCode\n\nBackground\nThe below example shows how to configure VSCode to debug data preprocessing of the chat_template format. This is the format used when you have the following in your axolotl config:\ndatasets:\n - path: <path to your chat_template formatted dataset> # example on HF Hub: fozziethebeat/alpaca_messages_2k_test\n type: chat_template\n\n[!Important]\nIf you are already familiar with advanced VSCode debugging, you can skip the below explanation and look at the files .vscode/launch.json and .vscode/tasks.json for an example configuration.\n\n\n[!Tip]\nIf you prefer to watch a video, rather than read, you can skip to the video tutorial below (but doing both is recommended).\n\n\n\nSetup\nMake sure you have an editable install of Axolotl, which ensures that changes you make to the code are reflected at runtime. Run the following commands from the root of this project:\nexport UV_TORCH_BACKEND=cu128 # or cu130\nuv venv --no-project --relocatable\nsource .venv/bin/activate\nuv pip install --no-build-isolation -e '.[deepspeed]' --group dev --group test\n\nRemote Hosts\nIf you developing on a remote host, you can easily use VSCode to debug remotely. To do so, you will need to follow this remote - SSH guide. You can also see the video below on Docker and Remote SSH debugging.\n\n\n\nConfiguration\nThe easiest way to get started is to modify the .vscode/launch.json file in this project. This is just an example configuration, so you may need to modify or copy it to suit your needs.\nFor example, to mimic the command cd devtools && CUDA_VISIBLE_DEVICES=0 axolotl train dev_chat_template.yml, you would use the below configuration1. Note that we add additional flags that override the axolotl config and incorporate the tips above (see the comments). We also set the working directory to devtools and set the env variable HF_HOME to a temporary folder that is later partially deleted. This is because we want to delete the HF dataset cache before each run in order to ensure that the data preprocessing code is run from scratch.\n// .vscode/launch.json\n{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Debug axolotl prompt - chat_template\",\n \"type\": \"python\",\n \"module\": \"accelerate.commands.launch\",\n \"request\": \"launch\",\n \"args\": [\n \"-m\", \"axolotl.cli.train\", \"dev_chat_template.yml\",\n // The flags below simplify debugging by overriding the axolotl config\n // with the debugging tips above. Modify as needed.\n \"--dataset_num_proc=1\", // limits data preprocessing to one process\n \"--max_steps=1\", // limits training to just one step\n \"--batch_size=1\", // minimizes batch size\n \"--micro_batch_size=1\", // minimizes batch size\n \"--val_set_size=0\", // disables validation\n \"--sample_packing=False\", // disables sample packing which is necessary for small datasets\n \"--eval_sample_packing=False\",// disables sample packing on eval set\n \"--dataset_prepared_path=temp_debug/axolotl_outputs/data\", // send data outputs to a temp folder\n \"--output_dir=temp_debug/axolotl_outputs/model\" // send model outputs to a temp folder\n ],\n \"console\": \"integratedTerminal\", // show output in the integrated terminal\n \"cwd\": \"${workspaceFolder}/devtools\", // set working directory to devtools from the root of the project\n \"justMyCode\": true, // step through only axolotl code\n \"env\": {\"CUDA_VISIBLE_DEVICES\": \"0\", // Since we aren't doing distributed training, we need to limit to one GPU\n \"HF_HOME\": \"${workspaceFolder}/devtools/temp_debug/.hf-cache\"}, // send HF cache to a temp folder\n \"preLaunchTask\": \"cleanup-for-dataprep\", // delete temp folders (see below)\n }\n ]\n}\nAdditional notes about this configuration:\n\nThe argument justMyCode is set to true such that you step through only the axolotl code. If you want to step into dependencies, set this to false.\nThe preLaunchTask: cleanup-for-dataprep is defined in .vscode/tasks.json and is used to delete the following folders before debugging, which is essential to ensure that the data pre-processing code is run from scratch:\n\n./devtools/temp_debug/axolotl_outputs\n./devtools/temp_debug/.hf-cache/datasets\n\n\n\n[!Tip]\nYou may not want to delete these folders. For example, if you are debugging model training instead of data pre-processing, you may NOT want to delete the cache or output folders. You may also need to add additional tasks to the tasks.json file depending on your use case.\n\nBelow is the ./vscode/tasks.json file that defines the cleanup-for-dataprep task. This task is run before each debugging session when you use the above configuration. Note how there are two tasks that delete the two folders mentioned above. The third task cleanup-for-dataprep is a composite task that combines the two tasks. A composite task is necessary because VSCode does not allow you to specify multiple tasks in the preLaunchTask argument of the launch.json file.\n// .vscode/tasks.json\n// this file is used by launch.json\n{\n \"version\": \"2.0.0\",\n \"tasks\": [\n // this task changes into the devtools directory and deletes the temp_debug/axolotl_outputs folder\n {\n \"label\": \"delete-outputs\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/axolotl_outputs\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task changes into the devtools directory and deletes the `temp_debug/.hf-cache/datasets` folder\n {\n \"label\": \"delete-temp-hf-dataset-cache\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/.hf-cache/datasets\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task combines the two tasks above\n {\n \"label\": \"cleanup-for-dataprep\",\n \"dependsOn\": [\"delete-outputs\", \"delete-temp-hf-dataset-cache\"],\n }\n ]\n}\n\n\nCustomizing your debugger\nYour debugging use case may differ from the example above. The easiest thing to do is to put your own axolotl config in the devtools folder and modify the launch.json file to use your config. You may also want to modify the preLaunchTask to delete different folders or not delete anything at all.\n\n\nVideo Tutorial\nThe following video tutorial walks through the above configuration and demonstrates how to debug with VSCode, (click the image below to watch):\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl w/VSCode",
"crumbs": [
"Troubleshooting",
"Debugging"
@@ -6078,7 +6078,7 @@
"href": "docs/debugging.html#debugging-with-docker",
"title": "Debugging",
"section": "Debugging With Docker",
- "text": "Debugging With Docker\nUsing official Axolotl Docker images is a great way to debug your code, and is a very popular way to use Axolotl. Attaching VSCode to Docker takes a few more steps.\n\nSetup\nOn the host that is running axolotl (ex: if you are using a remote host), clone the axolotl repo and change your current directory to the root:\ngit clone https://github.com/axolotl-ai-cloud/axolotl\ncd axolotl\n\n[!Tip]\nIf you already have axolotl cloned on your host, make sure you have the latest changes and change into the root of the project.\n\nNext, run the desired docker image and mount the current directory. Below is a docker command you can run to do this:2\ndocker run --privileged --gpus '\"all\"' --shm-size 10g --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=bind,src=\"${PWD}\",target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface axolotlai/axolotl-uv:main-latest\n\n[!Tip]\nTo understand which containers are available, see the Docker section of the README and the DockerHub repo. For details of how the Docker containers are built, see axolotl’s Docker CI builds.\n\nYou will now be in the container. Next, install Axolotl with dev dependencies:\nuv sync --extra flash-attn --extra deepspeed --group dev --group test\nsource .venv/bin/activate\n\n\nAttach To Container\nNext, if you are using a remote host, Remote into this host with VSCode. If you are using a local host, you can skip this step.\nNext, select Dev Containers: Attach to Running Container... using the command palette (CMD + SHIFT + P) in VSCode. You will be prompted to select a container to attach to. Select the container you just created. You will now be in the container with a working directory that is at the root of the project. Any changes you make to the code will be reflected both in the container and on the host.\nNow you are ready to debug as described above (see Debugging with VSCode).\n\n\nVideo - Attaching To Docker On Remote Host\nHere is a short video that demonstrates how to attach to a Docker container on a remote host:\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl Part 2: Attaching to Docker on a Remote Host",
+ "text": "Debugging With Docker\nUsing official Axolotl Docker images is a great way to debug your code, and is a very popular way to use Axolotl. Attaching VSCode to Docker takes a few more steps.\n\nSetup\nOn the host that is running axolotl (ex: if you are using a remote host), clone the axolotl repo and change your current directory to the root:\ngit clone https://github.com/axolotl-ai-cloud/axolotl\ncd axolotl\n\n[!Tip]\nIf you already have axolotl cloned on your host, make sure you have the latest changes and change into the root of the project.\n\nNext, run the desired docker image and mount the current directory. Below is a docker command you can run to do this:2\ndocker run --privileged --gpus '\"all\"' --shm-size 10g --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=bind,src=\"${PWD}\",target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface axolotlai/axolotl-uv:main-latest\n\n[!Tip]\nTo understand which containers are available, see the Docker section of the README and the DockerHub repo. For details of how the Docker containers are built, see axolotl’s Docker CI builds.\n\nYou will now be in the container. Next, install Axolotl with dev dependencies:\nuv venv --no-project --relocatable\nsource .venv/bin/activate\nuv pip install --no-build-isolation -e '.[deepspeed]' --group dev --group test\n\n\nAttach To Container\nNext, if you are using a remote host, Remote into this host with VSCode. If you are using a local host, you can skip this step.\nNext, select Dev Containers: Attach to Running Container... using the command palette (CMD + SHIFT + P) in VSCode. You will be prompted to select a container to attach to. Select the container you just created. You will now be in the container with a working directory that is at the root of the project. Any changes you make to the code will be reflected both in the container and on the host.\nNow you are ready to debug as described above (see Debugging with VSCode).\n\n\nVideo - Attaching To Docker On Remote Host\nHere is a short video that demonstrates how to attach to a Docker container on a remote host:\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl Part 2: Attaching to Docker on a Remote Host",
"crumbs": [
"Troubleshooting",
"Debugging"
@@ -6225,7 +6225,7 @@
"href": "docs/models/gpt-oss.html#getting-started",
"title": "GPT-OSS",
"section": "Getting started",
- "text": "Getting started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.6.0 min)\nuv pip install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'\n\nChoose one of the following configs below for training the 20B model. (for 120B, see below)\n\n# LoRA SFT linear layers (1x48GB @ ~44GiB)\naxolotl train examples/gpt-oss/gpt-oss-20b-sft-lora-singlegpu.yaml\n\n# FFT SFT with offloading (2x24GB @ ~21GiB/GPU)\naxolotl train examples/gpt-oss/gpt-oss-20b-fft-fsdp2-offload.yaml\n\n# FFT SFT (8x48GB @ ~36GiB/GPU or 4x80GB @ ~46GiB/GPU)\naxolotl train examples/gpt-oss/gpt-oss-20b-fft-fsdp2.yaml\nNote: Memory usage taken from device_mem_reserved(gib) from logs.\n\nTraining 120B\nOn 8xH100s, make sure you have ~3TB of free disk space. With each checkpoint clocking in at ~720GB, along with the base\nmodel, and final model output, you may need at least 3TB of free disk space to keep at least 2 checkpoints.\n# FFT SFT with offloading (8x80GB @ ~49GiB/GPU)\naxolotl train examples/gpt-oss/gpt-oss-120b-fft-fsdp2-offload.yaml\nTo simplify fine-tuning across 2 nodes × 8x H100 (80GB) GPUs, we’ve partnered with Baseten to showcase multi-node\ntraining of the 120B model using Baseten Truss. You can read more about this recipe on\nBaseten’s blog. The recipe can\nbe found on their\nGitHub.\nERRATA: Transformers saves the model Architecture prefixed with FSDP which needs to be manually renamed in config.json.\nSee https://github.com/huggingface/transformers/pull/40207 for the status of this issue.\nsed -i 's/FSDPGptOssForCausalLM/GptOssForCausalLM/g' ./outputs/gpt-oss-out/config.json\nWhen using SHARDED_STATE_DICT with FSDP, the final checkpoint should automatically merge the sharded weights to your\nconfigured output_dir. However, if that step fails due to a disk space error, you can take an additional step to\nmerge the sharded weights. This step will automatically determine the last checkpoint directory and merge the sharded\nweights to {output_dir}/merged.\naxolotl merge-sharded-fsdp-weights examples/gpt-oss/gpt-oss-120b-fft-fsdp2-offload.yaml\nmv ./outputs/gpt-oss-out/merged/* ./outputs/gpt-oss-out/\n\n\nHow to set reasoning_effort in template?\nThe harmony template has a feature to set the reasoning_effort during prompt building. The default is medium. If you would like to adjust this, you can add the following to your config:\nchat_template_kwargs:\n reasoning_effort: \"high\" # low | medium | high\nCurrently, this applies globally. There is no method to apply per sample yet. If you are interested in adding this, please feel free to create an Issue to discuss.\n\n\nInferencing your fine-tuned model\n\nvLLM\nGPT-OSS support in vLLM does not exist in a stable release yet. See https://x.com/MaziyarPanahi/status/1955741905515323425\nfor more information about using a special vllm-openai docker image for inferencing with vLLM.\nOptionally, vLLM can be installed from nightly:\nuv pip install --no-build-isolation --pre -U vllm --extra-index-url https://wheels.vllm.ai/nightly\nand the vLLM server can be started with the following command (modify --tensor-parallel-size 8 to match your environment):\nvllm serve ./outputs/gpt-oss-out/ --served-model-name axolotl/gpt-oss-20b --host 0.0.0.0 --port 8888 --tensor-parallel-size 8\n\n\nSGLang\nSGLang has 0-day support in main, see https://github.com/sgl-project/sglang/issues/8833 for infomation on installing\nSGLang from source. Once you’ve installed SGLang, run the following command to launch a SGLang server:\npython3 -m sglang.launch_server --model ./outputs/gpt-oss-out/ --served-model-name axolotl/gpt-oss-120b --host 0.0.0.0 --port 8888 --tp 8\n\n\n\nTool use\nGPT-OSS has a comprehensive tool understanding. Axolotl supports tool calling datasets for Supervised Fine-tuning.\nHere is an example dataset config:\ndatasets:\n - path: Nanobit/text-tools-2k-test\n type: chat_template\nSee Nanobit/text-tools-2k-test for the sample dataset.\nRefer to our docs for more info.\n\n\nThinking and chat_template masking conflict\nOpenAI’s Harmony template hides thinking in all non-final turns, which conflicts with Axolotl’s chat_template masking.\nIf your dataset has thinking content mid-turn, there are two paths we recommend:\n\nTrain only on the last turn. This can be accomplished via chat_template’s train on last doc.\nAdjust your dataset to only have thinking content in the last turn.\n\n\n\nTIPS\n\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.",
+ "text": "Getting started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.9.1 min)\nuv pip install --no-build-isolation 'axolotl>=0.16.1'\n\nChoose one of the following configs below for training the 20B model. (for 120B, see below)\n\n# LoRA SFT linear layers (1x48GB @ ~44GiB)\naxolotl train examples/gpt-oss/gpt-oss-20b-sft-lora-singlegpu.yaml\n\n# FFT SFT with offloading (2x24GB @ ~21GiB/GPU)\naxolotl train examples/gpt-oss/gpt-oss-20b-fft-fsdp2-offload.yaml\n\n# FFT SFT (8x48GB @ ~36GiB/GPU or 4x80GB @ ~46GiB/GPU)\naxolotl train examples/gpt-oss/gpt-oss-20b-fft-fsdp2.yaml\nNote: Memory usage taken from device_mem_reserved(gib) from logs.\n\nTraining 120B\nOn 8xH100s, make sure you have ~3TB of free disk space. With each checkpoint clocking in at ~720GB, along with the base\nmodel, and final model output, you may need at least 3TB of free disk space to keep at least 2 checkpoints.\n# FFT SFT with offloading (8x80GB @ ~49GiB/GPU)\naxolotl train examples/gpt-oss/gpt-oss-120b-fft-fsdp2-offload.yaml\nTo simplify fine-tuning across 2 nodes × 8x H100 (80GB) GPUs, we’ve partnered with Baseten to showcase multi-node\ntraining of the 120B model using Baseten Truss. You can read more about this recipe on\nBaseten’s blog. The recipe can\nbe found on their\nGitHub.\nERRATA: Transformers saves the model Architecture prefixed with FSDP which needs to be manually renamed in config.json.\nSee https://github.com/huggingface/transformers/pull/40207 for the status of this issue.\nsed -i 's/FSDPGptOssForCausalLM/GptOssForCausalLM/g' ./outputs/gpt-oss-out/config.json\nWhen using SHARDED_STATE_DICT with FSDP, the final checkpoint should automatically merge the sharded weights to your\nconfigured output_dir. However, if that step fails due to a disk space error, you can take an additional step to\nmerge the sharded weights. This step will automatically determine the last checkpoint directory and merge the sharded\nweights to {output_dir}/merged.\naxolotl merge-sharded-fsdp-weights examples/gpt-oss/gpt-oss-120b-fft-fsdp2-offload.yaml\nmv ./outputs/gpt-oss-out/merged/* ./outputs/gpt-oss-out/\n\n\nHow to set reasoning_effort in template?\nThe harmony template has a feature to set the reasoning_effort during prompt building. The default is medium. If you would like to adjust this, you can add the following to your config:\nchat_template_kwargs:\n reasoning_effort: \"high\" # low | medium | high\nCurrently, this applies globally. There is no method to apply per sample yet. If you are interested in adding this, please feel free to create an Issue to discuss.\n\n\nInferencing your fine-tuned model\n\nvLLM\nGPT-OSS support in vLLM does not exist in a stable release yet. See https://x.com/MaziyarPanahi/status/1955741905515323425\nfor more information about using a special vllm-openai docker image for inferencing with vLLM.\nOptionally, vLLM can be installed from nightly:\nuv pip install --no-build-isolation --pre -U vllm --extra-index-url https://wheels.vllm.ai/nightly\nand the vLLM server can be started with the following command (modify --tensor-parallel-size 8 to match your environment):\nvllm serve ./outputs/gpt-oss-out/ --served-model-name axolotl/gpt-oss-20b --host 0.0.0.0 --port 8888 --tensor-parallel-size 8\n\n\nSGLang\nSGLang has 0-day support in main, see https://github.com/sgl-project/sglang/issues/8833 for infomation on installing\nSGLang from source. Once you’ve installed SGLang, run the following command to launch a SGLang server:\npython3 -m sglang.launch_server --model ./outputs/gpt-oss-out/ --served-model-name axolotl/gpt-oss-120b --host 0.0.0.0 --port 8888 --tp 8\n\n\n\nTool use\nGPT-OSS has a comprehensive tool understanding. Axolotl supports tool calling datasets for Supervised Fine-tuning.\nHere is an example dataset config:\ndatasets:\n - path: Nanobit/text-tools-2k-test\n type: chat_template\nSee Nanobit/text-tools-2k-test for the sample dataset.\nRefer to our docs for more info.\n\n\nThinking and chat_template masking conflict\nOpenAI’s Harmony template hides thinking in all non-final turns, which conflicts with Axolotl’s chat_template masking.\nIf your dataset has thinking content mid-turn, there are two paths we recommend:\n\nTrain only on the last turn. This can be accomplished via chat_template’s train on last doc.\nAdjust your dataset to only have thinking content in the last turn.\n\n\n\nTIPS\n\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.",
"crumbs": [
"Getting Started",
"Model Guides",
@@ -6273,7 +6273,7 @@
"href": "docs/models/LiquidAI.html#getting-started",
"title": "Liquid Foundation Models 2",
"section": "Getting Started",
- "text": "Getting Started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n# Ensure you have a compatible version of Pytorch installed\nuv pip install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'\nRun one of the finetuning examples below.\nLFM2\n# FFT SFT (1x48GB @ 25GiB)\naxolotl train examples/LiquidAI/lfm2-350m-fft.yaml\nLFM2-VL\n# LoRA SFT (1x48GB @ 2.7GiB)\naxolotl train examples/LiquidAI/lfm2-vl-lora.yaml\nLFM2-MoE\nuv pip install git+https://github.com/huggingface/transformers.git@0c9a72e4576fe4c84077f066e585129c97bfd4e6\n\n# LoRA SFT (1x48GB @ 16.2GiB)\naxolotl train examples/LiquidAI/lfm2-8b-a1b-lora.yaml\n\n\nTIPS\n\nInstallation Error: If you encounter ImportError: ... undefined symbol ... or ModuleNotFoundError: No module named 'causal_conv1d_cuda', the causal-conv1d package may have been installed incorrectly. Try uninstalling it:\nuv pip uninstall causal-conv1d\nDataset Loading: Read more on how to load your own dataset in our documentation.\nDataset Formats:\n\nFor LFM2 models, the dataset format follows the OpenAI Messages format as seen here.\nFor LFM2-VL models, Axolotl follows the multi-content Messages format. See our Multimodal docs for details.",
+ "text": "Getting Started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n# Ensure you have a compatible version of Pytorch installed\nuv pip install --no-build-isolation 'axolotl>=0.16.1'\nRun one of the finetuning examples below.\nLFM2\n# FFT SFT (1x48GB @ 25GiB)\naxolotl train examples/LiquidAI/lfm2-350m-fft.yaml\nLFM2-VL\n# LoRA SFT (1x48GB @ 2.7GiB)\naxolotl train examples/LiquidAI/lfm2-vl-lora.yaml\nLFM2-MoE\nuv pip install git+https://github.com/huggingface/transformers.git@0c9a72e4576fe4c84077f066e585129c97bfd4e6\n\n# LoRA SFT (1x48GB @ 16.2GiB)\naxolotl train examples/LiquidAI/lfm2-8b-a1b-lora.yaml\n\n\nTIPS\n\nInstallation Error: If you encounter ImportError: ... undefined symbol ... or ModuleNotFoundError: No module named 'causal_conv1d_cuda', the causal-conv1d package may have been installed incorrectly. Try uninstalling it:\nuv pip uninstall causal-conv1d\nDataset Loading: Read more on how to load your own dataset in our documentation.\nDataset Formats:\n\nFor LFM2 models, the dataset format follows the OpenAI Messages format as seen here.\nFor LFM2-VL models, Axolotl follows the multi-content Messages format. See our Multimodal docs for details.",
"crumbs": [
"Getting Started",
"Model Guides",
@@ -6321,7 +6321,7 @@
"href": "docs/models/granite4.html#getting-started",
"title": "Granite 4",
"section": "Getting started",
- "text": "Getting started\n\nInstall Axolotl following the installation guide. You need to install from main as Granite4 is only on nightly or use our latest Docker images.\nHere is an example of how to install from main for pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.7.1 min)\ngit clone https://github.com/axolotl-ai-cloud/axolotl.git\ncd axolotl\n\nuv pip install --no-build-isolation -e '.[flash-attn]'\n\n# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy\npython scripts/cutcrossentropy_install.py | sh\n\nRun the finetuning example:\n\naxolotl train examples/granite4/granite-4.0-tiny-fft.yaml\nThis config uses about 40.8GiB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nTIPS\n\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.\n\n\n\nLimitation\nAdapter finetuning does not work at the moment. It would error with\nRuntimeError: mat1 and mat2 shapes cannot be multiplied (4096x3072 and 1x1179648)\nIn addition, if adapter training works, lora_target_linear: true will not work due to:\nValueError: Target module GraniteMoeHybridParallelExperts() is not supported.",
+ "text": "Getting started\n\nInstall Axolotl following the installation guide. You need to install from main as Granite4 is only on nightly or use our latest Docker images.\nHere is an example of how to install from main for pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.9.1 min)\ngit clone https://github.com/axolotl-ai-cloud/axolotl.git\ncd axolotl\n\nuv pip install --no-build-isolation -e '.'\n\n# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy\npython scripts/cutcrossentropy_install.py | sh\n\nRun the finetuning example:\n\naxolotl train examples/granite4/granite-4.0-tiny-fft.yaml\nThis config uses about 40.8GiB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nTIPS\n\nRead more on how to load your own dataset at docs.\nThe dataset format follows the OpenAI Messages format as seen here.\n\n\n\nLimitation\nAdapter finetuning does not work at the moment. It would error with\nRuntimeError: mat1 and mat2 shapes cannot be multiplied (4096x3072 and 1x1179648)\nIn addition, if adapter training works, lora_target_linear: true will not work due to:\nValueError: Target module GraniteMoeHybridParallelExperts() is not supported.",
"crumbs": [
"Getting Started",
"Model Guides",
@@ -6369,7 +6369,7 @@
"href": "docs/models/voxtral.html#getting-started",
"title": "Voxtral",
"section": "Getting started",
- "text": "Getting started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.6.0 min)\nuv pip install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'\n\nPlease install the below.\n\n# audio\nuv pip install librosa==0.11.0\nuv pip install 'mistral_common[audio]==1.8.3'\n\n# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy\npython scripts/cutcrossentropy_install.py | sh\n\nDownload sample dataset files\n\n# for text + audio only\nwget https://huggingface.co/datasets/Nanobit/text-audio-2k-test/resolve/main/En-us-African_elephant.oga\n\nRun the finetuning example:\n\n# text only\naxolotl train examples/voxtral/voxtral-mini-qlora.yml\n\n# text + audio\naxolotl train examples/voxtral/voxtral-mini-audio-qlora.yml\nThese configs use about 4.8 GB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nTIPS\n\nFor inference, the official MistralAI team recommends temperature: 0.2 and top_p: 0.95 for audio understanding and temperature: 0.0 for transcription.\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe text dataset format follows the OpenAI Messages format as seen here.\nThe multimodal dataset format follows the OpenAI multi-content Messages format as seen here.",
+ "text": "Getting started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.9.1 min)\nuv pip install --no-build-isolation 'axolotl>=0.16.1'\n\nPlease install the below.\n\n# audio\nuv pip install librosa==0.11.0\nuv pip install 'mistral_common[audio]==1.8.3'\n\n# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy\npython scripts/cutcrossentropy_install.py | sh\n\nDownload sample dataset files\n\n# for text + audio only\nwget https://huggingface.co/datasets/Nanobit/text-audio-2k-test/resolve/main/En-us-African_elephant.oga\n\nRun the finetuning example:\n\n# text only\naxolotl train examples/voxtral/voxtral-mini-qlora.yml\n\n# text + audio\naxolotl train examples/voxtral/voxtral-mini-audio-qlora.yml\nThese configs use about 4.8 GB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nTIPS\n\nFor inference, the official MistralAI team recommends temperature: 0.2 and top_p: 0.95 for audio understanding and temperature: 0.0 for transcription.\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe text dataset format follows the OpenAI Messages format as seen here.\nThe multimodal dataset format follows the OpenAI multi-content Messages format as seen here.",
"crumbs": [
"Getting Started",
"Model Guides",
@@ -6741,7 +6741,7 @@
"href": "docs/models/magistral.html#getting-started",
"title": "Magistral",
"section": "Getting started",
- "text": "Getting started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.7.0 min)\nuv pip install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'\n\nInstall Cut Cross Entropy to reduce training VRAM usage\n\npython scripts/cutcrossentropy_install.py | sh\n\nRun the finetuning example:\n\naxolotl train examples/magistral/magistral-small-qlora.yaml\nThis config uses about 24GB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nThinking\nMistralAI has released their 2507 model with thinking capabilities, enabling Chain-of-Thought reasoning with explicit thinking steps.\n📚 See the Thinking fine-tuning guide →\n\n\nVision\nMistralAI has released their 2509 model with vision capabilities.\n📚 See the Vision fine-tuning guide →\n\n\nTips\n\nWe recommend adding the same/similar SystemPrompt that the model is tuned for. You can find this within the repo’s files titled SYSTEM_PROMPT.txt.\nFor inference, the official MistralAI team recommends top_p: 0.95 and temperature: 0.7 with max_tokens: 40960.\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe text dataset format follows the OpenAI Messages format as seen here.",
+ "text": "Getting started\n\nInstall Axolotl following the installation guide.\nHere is an example of how to install from pip:\n\n# Ensure you have Pytorch installed (Pytorch 2.9.1 min)\nuv pip install --no-build-isolation 'axolotl>=0.16.1'\n\nInstall Cut Cross Entropy to reduce training VRAM usage\n\npython scripts/cutcrossentropy_install.py | sh\n\nRun the finetuning example:\n\naxolotl train examples/magistral/magistral-small-qlora.yaml\nThis config uses about 24GB VRAM.\nLet us know how it goes. Happy finetuning! 🚀\n\nThinking\nMistralAI has released their 2507 model with thinking capabilities, enabling Chain-of-Thought reasoning with explicit thinking steps.\n📚 See the Thinking fine-tuning guide →\n\n\nVision\nMistralAI has released their 2509 model with vision capabilities.\n📚 See the Vision fine-tuning guide →\n\n\nTips\n\nWe recommend adding the same/similar SystemPrompt that the model is tuned for. You can find this within the repo’s files titled SYSTEM_PROMPT.txt.\nFor inference, the official MistralAI team recommends top_p: 0.95 and temperature: 0.7 with max_tokens: 40960.\nYou can run a full finetuning by removing the adapter: qlora and load_in_4bit: true from the config.\nRead more on how to load your own dataset at docs.\nThe text dataset format follows the OpenAI Messages format as seen here.",
"crumbs": [
"Getting Started",
"Model Guides",
diff --git a/sitemap.xml b/sitemap.xml
index 8616d6e61..119f5d7dc 100644
--- a/sitemap.xml
+++ b/sitemap.xml
@@ -2,982 +2,982 @@
https://docs.axolotl.ai/FAQS.html
- 2026-04-23T04:27:41.732Z
+ 2026-04-24T09:04:04.827Zhttps://docs.axolotl.ai/docs/qat.html
- 2026-04-23T04:27:41.738Z
+ 2026-04-24T09:04:04.836Zhttps://docs.axolotl.ai/docs/models/seed-oss.html
- 2026-04-23T04:31:06.639Z
+ 2026-04-24T09:07:14.956Zhttps://docs.axolotl.ai/docs/models/internvl3_5.html
- 2026-04-23T04:31:06.630Z
+ 2026-04-24T09:07:14.948Zhttps://docs.axolotl.ai/docs/models/apertus.html
- 2026-04-23T04:31:06.638Z
+ 2026-04-24T09:07:14.955Zhttps://docs.axolotl.ai/docs/models/mistral.html
- 2026-04-23T04:31:06.636Z
+ 2026-04-24T09:07:14.953Zhttps://docs.axolotl.ai/docs/models/smolvlm2.html
- 2026-04-23T04:31:06.639Z
+ 2026-04-24T09:07:14.957Zhttps://docs.axolotl.ai/docs/models/arcee.html
- 2026-04-23T04:31:06.632Z
+ 2026-04-24T09:07:14.949Zhttps://docs.axolotl.ai/docs/models/ministral3/vision.html
- 2026-04-23T04:31:06.633Z
+ 2026-04-24T09:07:14.950Zhttps://docs.axolotl.ai/docs/models/kimi-linear.html
- 2026-04-23T04:31:06.629Z
+ 2026-04-24T09:07:14.947Zhttps://docs.axolotl.ai/docs/models/mimo.html
- 2026-04-23T04:31:06.630Z
+ 2026-04-24T09:07:14.947Zhttps://docs.axolotl.ai/docs/models/phi.html
- 2026-04-23T04:31:06.639Z
+ 2026-04-24T09:07:14.956Zhttps://docs.axolotl.ai/docs/models/qwen3.html
- 2026-04-23T04:31:06.637Z
+ 2026-04-24T09:07:14.954Zhttps://docs.axolotl.ai/docs/models/hunyuan.html
- 2026-04-23T04:31:06.640Z
+ 2026-04-24T09:07:14.958Zhttps://docs.axolotl.ai/docs/models/ministral.html
- 2026-04-23T04:31:06.635Z
+ 2026-04-24T09:07:14.952Zhttps://docs.axolotl.ai/docs/models/gemma3n.html
- 2026-04-23T04:31:06.637Z
+ 2026-04-24T09:07:14.955Zhttps://docs.axolotl.ai/docs/models/devstral.html
- 2026-04-23T04:31:06.636Z
+ 2026-04-24T09:07:14.953Zhttps://docs.axolotl.ai/docs/models/magistral/think.html
- 2026-04-23T04:31:06.634Z
+ 2026-04-24T09:07:14.951Zhttps://docs.axolotl.ai/docs/models/qwen3-next.html
- 2026-04-23T04:31:06.637Z
+ 2026-04-24T09:07:14.954Zhttps://docs.axolotl.ai/docs/training_stability.html
- 2026-04-23T04:27:41.739Z
+ 2026-04-24T09:04:04.838Zhttps://docs.axolotl.ai/docs/expert_quantization.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/rlhf.html
- 2026-04-23T04:27:41.738Z
+ 2026-04-24T09:04:04.837Zhttps://docs.axolotl.ai/docs/dataset-formats/conversation.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.831Zhttps://docs.axolotl.ai/docs/dataset-formats/template_free.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/dataset-formats/tokenized.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/gradient_checkpointing.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/checkpoint_saving.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.kto.llama3.html
- 2026-04-23T04:30:40.754Z
+ 2026-04-24T09:06:49.101Zhttps://docs.axolotl.ai/docs/api/core.chat.format.chatml.html
- 2026-04-23T04:30:40.069Z
+ 2026-04-24T09:06:48.408Zhttps://docs.axolotl.ai/docs/api/cli.quantize.html
- 2026-04-23T04:30:40.280Z
+ 2026-04-24T09:06:48.625Zhttps://docs.axolotl.ai/docs/api/utils.lora.html
- 2026-04-23T04:30:41.044Z
+ 2026-04-24T09:06:49.392Zhttps://docs.axolotl.ai/docs/api/loaders.model.html
- 2026-04-23T04:30:40.453Z
+ 2026-04-24T09:06:48.800Zhttps://docs.axolotl.ai/docs/api/cli.merge_lora.html
- 2026-04-23T04:30:40.250Z
+ 2026-04-24T09:06:48.593Zhttps://docs.axolotl.ai/docs/api/core.trainers.dpo.trainer.html
- 2026-04-23T04:30:40.407Z
+ 2026-04-24T09:06:48.753Zhttps://docs.axolotl.ai/docs/api/monkeypatch.transformers_fa_utils.html
- 2026-04-23T04:30:40.976Z
+ 2026-04-24T09:06:49.324Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.completion.html
- 2026-04-23T04:30:40.660Z
+ 2026-04-24T09:06:49.009Zhttps://docs.axolotl.ai/docs/api/convert.html
- 2026-04-23T04:30:39.938Z
+ 2026-04-24T09:06:48.273Zhttps://docs.axolotl.ai/docs/api/utils.schemas.datasets.html
- 2026-04-23T04:30:41.267Z
+ 2026-04-24T09:06:49.617Zhttps://docs.axolotl.ai/docs/api/core.chat.format.llama3x.html
- 2026-04-23T04:30:40.071Z
+ 2026-04-24T09:06:48.410Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.messages.chat.html
- 2026-04-23T04:30:40.703Z
+ 2026-04-24T09:06:49.050Zhttps://docs.axolotl.ai/docs/api/core.trainers.grpo.sampler.html
- 2026-04-23T04:30:40.440Z
+ 2026-04-24T09:06:48.786Zhttps://docs.axolotl.ai/docs/api/kernels.quantize.html
- 2026-04-23T04:30:40.891Z
+ 2026-04-24T09:06:49.238Zhttps://docs.axolotl.ai/docs/api/integrations.liger.args.html
- 2026-04-23T04:30:41.533Z
+ 2026-04-24T09:06:49.886Zhttps://docs.axolotl.ai/docs/api/cli.preprocess.html
- 2026-04-23T04:30:40.274Z
+ 2026-04-24T09:06:48.619Zhttps://docs.axolotl.ai/docs/api/utils.callbacks.comet_.html
- 2026-04-23T04:30:41.676Z
+ 2026-04-24T09:06:50.031Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.base.html
- 2026-04-23T04:30:40.548Z
+ 2026-04-24T09:06:48.897Zhttps://docs.axolotl.ai/docs/api/index.html
- 2026-04-23T04:30:39.776Z
+ 2026-04-24T09:06:48.157Zhttps://docs.axolotl.ai/docs/api/core.trainers.mixins.rng_state_loader.html
- 2026-04-23T04:30:40.508Z
+ 2026-04-24T09:06:48.856Zhttps://docs.axolotl.ai/docs/api/cli.cloud.base.html
- 2026-04-23T04:30:40.293Z
+ 2026-04-24T09:06:48.638Zhttps://docs.axolotl.ai/docs/api/core.builders.causal.html
- 2026-04-23T04:30:40.016Z
+ 2026-04-24T09:06:48.352Zhttps://docs.axolotl.ai/docs/api/monkeypatch.btlm_attn_hijack_flash.html
- 2026-04-23T04:30:40.957Z
+ 2026-04-24T09:06:49.305Zhttps://docs.axolotl.ai/docs/api/utils.trainer.html
- 2026-04-23T04:30:41.090Z
+ 2026-04-24T09:06:49.437Zhttps://docs.axolotl.ai/docs/api/monkeypatch.llama_attn_hijack_xformers.html
- 2026-04-23T04:30:40.901Z
+ 2026-04-24T09:06:49.249Zhttps://docs.axolotl.ai/docs/api/monkeypatch.gradient_checkpointing.offload_cpu.html
- 2026-04-23T04:30:40.994Z
+ 2026-04-24T09:06:49.342Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.dpo.llama3.html
- 2026-04-23T04:30:40.725Z
+ 2026-04-24T09:06:49.072Zhttps://docs.axolotl.ai/docs/api/monkeypatch.mixtral.html
- 2026-04-23T04:30:40.990Z
+ 2026-04-24T09:06:49.338Zhttps://docs.axolotl.ai/docs/api/integrations.cut_cross_entropy.args.html
- 2026-04-23T04:30:41.519Z
+ 2026-04-24T09:06:49.871Zhttps://docs.axolotl.ai/docs/api/integrations.spectrum.args.html
- 2026-04-23T04:30:41.542Z
+ 2026-04-24T09:06:49.895Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.dpo.user_defined.html
- 2026-04-23T04:30:40.741Z
+ 2026-04-24T09:06:49.089Zhttps://docs.axolotl.ai/docs/api/utils.data.sft.html
- 2026-04-23T04:30:41.181Z
+ 2026-04-24T09:06:49.529Zhttps://docs.axolotl.ai/docs/api/monkeypatch.trainer_fsdp_optim.html
- 2026-04-23T04:30:40.968Z
+ 2026-04-24T09:06:49.316Zhttps://docs.axolotl.ai/docs/api/utils.tokenization.html
- 2026-04-23T04:30:41.036Z
+ 2026-04-24T09:06:49.384Zhttps://docs.axolotl.ai/docs/api/loaders.processor.html
- 2026-04-23T04:30:40.466Z
+ 2026-04-24T09:06:48.813Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.dpo.chat_template.html
- 2026-04-23T04:30:40.711Z
+ 2026-04-24T09:06:49.058Zhttps://docs.axolotl.ai/docs/api/core.datasets.transforms.chat_builder.html
- 2026-04-23T04:30:40.088Z
+ 2026-04-24T09:06:48.428Zhttps://docs.axolotl.ai/docs/api/loaders.tokenizer.html
- 2026-04-23T04:30:40.464Z
+ 2026-04-24T09:06:48.811Zhttps://docs.axolotl.ai/docs/api/utils.callbacks.perplexity.html
- 2026-04-23T04:30:41.660Z
+ 2026-04-24T09:06:50.015Zhttps://docs.axolotl.ai/docs/api/integrations.base.html
- 2026-04-23T04:30:41.514Z
+ 2026-04-24T09:06:49.866Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.kto.user_defined.html
- 2026-04-23T04:30:40.766Z
+ 2026-04-24T09:06:49.113Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_w_system.html
- 2026-04-23T04:30:40.626Z
+ 2026-04-24T09:06:48.975Zhttps://docs.axolotl.ai/docs/api/models.mamba.modeling_mamba.html
- 2026-04-23T04:30:41.566Z
+ 2026-04-24T09:06:49.919Zhttps://docs.axolotl.ai/docs/api/cli.train.html
- 2026-04-23T04:30:40.146Z
+ 2026-04-24T09:06:48.487Zhttps://docs.axolotl.ai/docs/api/utils.freeze.html
- 2026-04-23T04:30:41.068Z
+ 2026-04-24T09:06:49.416Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.stepwise_supervised.html
- 2026-04-23T04:30:40.675Z
+ 2026-04-24T09:06:49.023Zhttps://docs.axolotl.ai/docs/api/cli.delinearize_llama4.html
- 2026-04-23T04:30:40.222Z
+ 2026-04-24T09:06:48.565Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.dpo.passthrough.html
- 2026-04-23T04:30:40.743Z
+ 2026-04-24T09:06:49.090Zhttps://docs.axolotl.ai/docs/api/cli.utils.load.html
- 2026-04-23T04:30:40.331Z
+ 2026-04-24T09:06:48.677Zhttps://docs.axolotl.ai/docs/api/kernels.swiglu.html
- 2026-04-23T04:30:40.876Z
+ 2026-04-24T09:06:49.223Zhttps://docs.axolotl.ai/docs/api/core.trainers.grpo.trainer.html
- 2026-04-23T04:30:40.424Z
+ 2026-04-24T09:06:48.771Zhttps://docs.axolotl.ai/docs/api/common.const.html
- 2026-04-23T04:30:41.546Z
+ 2026-04-24T09:06:49.899Zhttps://docs.axolotl.ai/docs/api/datasets.html
- 2026-04-23T04:30:39.921Z
+ 2026-04-24T09:06:48.256Zhttps://docs.axolotl.ai/docs/api/kernels.lora.html
- 2026-04-23T04:30:40.850Z
+ 2026-04-24T09:06:49.197Zhttps://docs.axolotl.ai/docs/api/monkeypatch.data.batch_dataset_fetcher.html
- 2026-04-23T04:30:40.988Z
+ 2026-04-24T09:06:49.336Zhttps://docs.axolotl.ai/docs/api/loaders.constants.html
- 2026-04-23T04:30:40.497Z
+ 2026-04-24T09:06:48.845Zhttps://docs.axolotl.ai/docs/api/utils.dict.html
- 2026-04-23T04:30:41.162Z
+ 2026-04-24T09:06:49.509Zhttps://docs.axolotl.ai/docs/api/utils.schemas.config.html
- 2026-04-23T04:30:41.225Z
+ 2026-04-24T09:06:49.574Zhttps://docs.axolotl.ai/docs/api/monkeypatch.mistral_attn_hijack_flash.html
- 2026-04-23T04:30:40.903Z
+ 2026-04-24T09:06:49.251Zhttps://docs.axolotl.ai/docs/api/kernels.utils.html
- 2026-04-23T04:30:40.893Z
+ 2026-04-24T09:06:49.240Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.chat_template.html
- 2026-04-23T04:30:40.592Z
+ 2026-04-24T09:06:48.941Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.user_defined.html
- 2026-04-23T04:30:40.636Z
+ 2026-04-24T09:06:48.986Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.kto.chatml.html
- 2026-04-23T04:30:40.764Z
+ 2026-04-24T09:06:49.111Zhttps://docs.axolotl.ai/docs/api/common.datasets.html
- 2026-04-23T04:30:41.565Z
+ 2026-04-24T09:06:49.918Zhttps://docs.axolotl.ai/docs/api/core.trainers.mixins.scheduler.html
- 2026-04-23T04:30:40.517Z
+ 2026-04-24T09:06:48.865Zhttps://docs.axolotl.ai/docs/api/core.datasets.chat.html
- 2026-04-23T04:30:40.079Z
+ 2026-04-24T09:06:48.419Zhttps://docs.axolotl.ai/docs/api/loaders.patch_manager.html
- 2026-04-23T04:30:40.495Z
+ 2026-04-24T09:06:48.843Zhttps://docs.axolotl.ai/docs/api/cli.utils.html
- 2026-04-23T04:30:40.303Z
+ 2026-04-24T09:06:48.648Zhttps://docs.axolotl.ai/docs/api/utils.callbacks.mlflow_.html
- 2026-04-23T04:30:41.671Z
+ 2026-04-24T09:06:50.026Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_instruct.html
- 2026-04-23T04:30:40.611Z
+ 2026-04-24T09:06:48.960Zhttps://docs.axolotl.ai/docs/api/logging_config.html
- 2026-04-23T04:30:40.002Z
+ 2026-04-24T09:06:48.338Zhttps://docs.axolotl.ai/docs/api/core.builders.base.html
- 2026-04-23T04:30:40.010Z
+ 2026-04-24T09:06:48.346Zhttps://docs.axolotl.ai/docs/api/utils.schemas.enums.html
- 2026-04-23T04:30:41.320Z
+ 2026-04-24T09:06:49.671Zhttps://docs.axolotl.ai/docs/getting-started.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/multi-gpu.html
- 2026-04-23T04:27:41.737Z
+ 2026-04-24T09:04:04.836Zhttps://docs.axolotl.ai/docs/lora_optims.html
- 2026-04-23T04:27:41.737Z
+ 2026-04-24T09:04:04.835Zhttps://docs.axolotl.ai/docs/telemetry.html
- 2026-04-23T04:27:41.739Z
+ 2026-04-24T09:04:04.838Zhttps://docs.axolotl.ai/docs/batch_vs_grad.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/custom_integrations.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/fsdp_qlora.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/nccl.html
- 2026-04-23T04:27:41.738Z
+ 2026-04-24T09:04:04.836Zhttps://docs.axolotl.ai/docs/vllm_serving.html
- 2026-04-23T04:27:41.739Z
+ 2026-04-24T09:04:04.838Zhttps://docs.axolotl.ai/docs/attention.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/multipack.html
- 2026-04-23T04:27:41.738Z
+ 2026-04-24T09:04:04.836Zhttps://docs.axolotl.ai/docs/torchao.html
- 2026-04-23T04:27:41.739Z
+ 2026-04-24T09:04:04.838Zhttps://docs.axolotl.ai/docs/nd_parallelism.html
- 2026-04-23T04:27:41.738Z
+ 2026-04-24T09:04:04.836Zhttps://docs.axolotl.ai/docs/mac.html
- 2026-04-23T04:27:41.737Z
+ 2026-04-24T09:04:04.835Zhttps://docs.axolotl.ai/docs/reward_modelling.html
- 2026-04-23T04:27:41.738Z
+ 2026-04-24T09:04:04.836Zhttps://docs.axolotl.ai/docs/agents/model_architectures.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/agents/grpo.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/agents/pretraining.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/agents/new_model_support.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/optimizers.html
- 2026-04-23T04:27:41.738Z
+ 2026-04-24T09:04:04.836Zhttps://docs.axolotl.ai/examples/colab-notebooks/colab-axolotl-example.html
- 2026-04-23T04:27:41.743Z
+ 2026-04-24T09:04:04.844Zhttps://docs.axolotl.ai/src/axolotl/integrations/LICENSE.html
- 2026-04-23T04:27:41.767Z
+ 2026-04-24T09:04:04.875Zhttps://docs.axolotl.ai/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html
- 2026-04-23T04:27:41.768Z
+ 2026-04-24T09:04:04.876Zhttps://docs.axolotl.ai/docs/installation.html
- 2026-04-23T04:27:41.737Z
+ 2026-04-24T09:04:04.835Zhttps://docs.axolotl.ai/docs/ebft.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/agents/reward_modelling.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/agents/sft.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/agents/preference_tuning.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/mixed_precision.html
- 2026-04-23T04:27:41.737Z
+ 2026-04-24T09:04:04.835Zhttps://docs.axolotl.ai/docs/docker.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/grpo.html
- 2026-04-23T04:27:41.735Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/streaming.html
- 2026-04-23T04:27:41.739Z
+ 2026-04-24T09:04:04.838Zhttps://docs.axolotl.ai/docs/choosing_method.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/dataset_loading.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/sequence_parallelism.html
- 2026-04-23T04:27:41.739Z
+ 2026-04-24T09:04:04.838Zhttps://docs.axolotl.ai/docs/optimizations.html
- 2026-04-23T04:27:41.738Z
+ 2026-04-24T09:04:04.836Zhttps://docs.axolotl.ai/docs/multi-node.html
- 2026-04-23T04:27:41.737Z
+ 2026-04-24T09:04:04.836Zhttps://docs.axolotl.ai/docs/lr_groups.html
- 2026-04-23T04:27:41.737Z
+ 2026-04-24T09:04:04.835Zhttps://docs.axolotl.ai/docs/quantize.html
- 2026-04-23T04:27:41.738Z
+ 2026-04-24T09:04:04.836Zhttps://docs.axolotl.ai/docs/inference.html
- 2026-04-23T04:27:41.737Z
+ 2026-04-24T09:04:04.835Zhttps://docs.axolotl.ai/docs/ray-integration.html
- 2026-04-23T04:27:41.738Z
+ 2026-04-24T09:04:04.836Zhttps://docs.axolotl.ai/docs/amd_hpc.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/input_output.html
- 2026-04-23T04:27:41.737Z
+ 2026-04-24T09:04:04.835Zhttps://docs.axolotl.ai/docs/multimodal.html
- 2026-04-23T04:27:41.738Z
+ 2026-04-24T09:04:04.836Zhttps://docs.axolotl.ai/docs/api/utils.schemas.multimodal.html
- 2026-04-23T04:30:41.289Z
+ 2026-04-24T09:06:49.639Zhttps://docs.axolotl.ai/docs/api/core.chat.format.shared.html
- 2026-04-23T04:30:40.072Z
+ 2026-04-24T09:06:48.412Zhttps://docs.axolotl.ai/docs/api/utils.samplers.multipack.html
- 2026-04-23T04:30:41.652Z
+ 2026-04-24T09:06:50.007Zhttps://docs.axolotl.ai/docs/api/utils.schedulers.html
- 2026-04-23T04:30:41.129Z
+ 2026-04-24T09:06:49.477Zhttps://docs.axolotl.ai/docs/api/cli.main.html
- 2026-04-23T04:30:40.136Z
+ 2026-04-24T09:06:48.477Zhttps://docs.axolotl.ai/docs/api/monkeypatch.stablelm_attn_hijack_flash.html
- 2026-04-23T04:30:40.964Z
+ 2026-04-24T09:06:49.312Zhttps://docs.axolotl.ai/docs/api/cli.utils.fetch.html
- 2026-04-23T04:30:40.324Z
+ 2026-04-24T09:06:48.670Zhttps://docs.axolotl.ai/docs/api/loaders.adapter.html
- 2026-04-23T04:30:40.473Z
+ 2026-04-24T09:06:48.820Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.llama2_chat.html
- 2026-04-23T04:30:40.652Z
+ 2026-04-24T09:06:49.002Zhttps://docs.axolotl.ai/docs/api/cli.merge_sharded_fsdp_weights.html
- 2026-04-23T04:30:40.264Z
+ 2026-04-24T09:06:48.608Zhttps://docs.axolotl.ai/docs/api/common.architectures.html
- 2026-04-23T04:30:41.544Z
+ 2026-04-24T09:06:49.897Zhttps://docs.axolotl.ai/docs/api/monkeypatch.llama_attn_hijack_flash.html
- 2026-04-23T04:30:40.900Z
+ 2026-04-24T09:06:49.247Zhttps://docs.axolotl.ai/docs/api/core.trainers.utils.html
- 2026-04-23T04:30:40.441Z
+ 2026-04-24T09:06:48.788Zhttps://docs.axolotl.ai/docs/api/kernels.geglu.html
- 2026-04-23T04:30:40.863Z
+ 2026-04-24T09:06:49.211Zhttps://docs.axolotl.ai/docs/api/utils.collators.mamba.html
- 2026-04-23T04:30:41.595Z
+ 2026-04-24T09:06:49.949Zhttps://docs.axolotl.ai/docs/api/utils.quantization.html
- 2026-04-23T04:30:41.205Z
+ 2026-04-24T09:06:49.553Zhttps://docs.axolotl.ai/docs/api/cli.vllm_serve.html
- 2026-04-23T04:30:40.289Z
+ 2026-04-24T09:06:48.634Zhttps://docs.axolotl.ai/docs/api/utils.ctx_managers.sequence_parallel.html
- 2026-04-23T04:30:40.546Z
+ 2026-04-24T09:06:48.895Zhttps://docs.axolotl.ai/docs/api/utils.schemas.peft.html
- 2026-04-23T04:30:41.278Z
+ 2026-04-24T09:06:49.628Zhttps://docs.axolotl.ai/docs/api/integrations.kd.trainer.html
- 2026-04-23T04:30:41.529Z
+ 2026-04-24T09:06:49.882Zhttps://docs.axolotl.ai/docs/api/utils.model_shard_quant.html
- 2026-04-23T04:30:41.051Z
+ 2026-04-24T09:06:49.399Zhttps://docs.axolotl.ai/docs/api/utils.collators.batching.html
- 2026-04-23T04:30:41.591Z
+ 2026-04-24T09:06:49.945Zhttps://docs.axolotl.ai/docs/api/utils.schemas.trl.html
- 2026-04-23T04:30:41.282Z
+ 2026-04-24T09:06:49.632Zhttps://docs.axolotl.ai/docs/api/core.builders.rl.html
- 2026-04-23T04:30:40.022Z
+ 2026-04-24T09:06:48.361Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_chat.html
- 2026-04-23T04:30:40.609Z
+ 2026-04-24T09:06:48.958Zhttps://docs.axolotl.ai/docs/api/core.trainers.trl.html
- 2026-04-23T04:30:40.391Z
+ 2026-04-24T09:06:48.737Zhttps://docs.axolotl.ai/docs/api/integrations.grokfast.optimizer.html
- 2026-04-23T04:30:41.520Z
+ 2026-04-24T09:06:49.872Zhttps://docs.axolotl.ai/docs/api/monkeypatch.multipack.html
- 2026-04-23T04:30:40.905Z
+ 2026-04-24T09:06:49.253Zhttps://docs.axolotl.ai/docs/api/cli.config.html
- 2026-04-23T04:30:40.216Z
+ 2026-04-24T09:06:48.558Zhttps://docs.axolotl.ai/docs/api/utils.collators.mm_chat.html
- 2026-04-23T04:30:41.601Z
+ 2026-04-24T09:06:49.955Zhttps://docs.axolotl.ai/docs/api/cli.cloud.modal_.html
- 2026-04-23T04:30:40.301Z
+ 2026-04-24T09:06:48.646Zhttps://docs.axolotl.ai/docs/api/utils.data.streaming.html
- 2026-04-23T04:30:41.173Z
+ 2026-04-24T09:06:49.521Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.input_output.html
- 2026-04-23T04:30:40.669Z
+ 2026-04-24T09:06:49.017Zhttps://docs.axolotl.ai/docs/api/cli.inference.html
- 2026-04-23T04:30:40.240Z
+ 2026-04-24T09:06:48.583Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.orcamini.html
- 2026-04-23T04:30:40.689Z
+ 2026-04-24T09:06:49.036Zhttps://docs.axolotl.ai/docs/api/train.html
- 2026-04-23T04:30:39.900Z
+ 2026-04-24T09:06:48.235Zhttps://docs.axolotl.ai/docs/api/core.trainers.mamba.html
- 2026-04-23T04:30:40.398Z
+ 2026-04-24T09:06:48.744Zhttps://docs.axolotl.ai/docs/api/integrations.lm_eval.args.html
- 2026-04-23T04:30:41.538Z
+ 2026-04-24T09:06:49.891Zhttps://docs.axolotl.ai/docs/api/core.chat.messages.html
- 2026-04-23T04:30:40.067Z
+ 2026-04-24T09:06:48.406Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.dpo.zephyr.html
- 2026-04-23T04:30:40.740Z
+ 2026-04-24T09:06:49.087Zhttps://docs.axolotl.ai/docs/api/core.trainers.base.html
- 2026-04-23T04:30:40.372Z
+ 2026-04-24T09:06:48.718Zhttps://docs.axolotl.ai/docs/api/utils.callbacks.qat.html
- 2026-04-23T04:30:41.684Z
+ 2026-04-24T09:06:50.040Zhttps://docs.axolotl.ai/docs/api/utils.bench.html
- 2026-04-23T04:30:41.056Z
+ 2026-04-24T09:06:49.403Zhttps://docs.axolotl.ai/docs/api/utils.optimizers.adopt.html
- 2026-04-23T04:30:41.172Z
+ 2026-04-24T09:06:49.519Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.bradley_terry.llama3.html
- 2026-04-23T04:30:40.797Z
+ 2026-04-24T09:06:49.144Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.pygmalion.html
- 2026-04-23T04:30:40.697Z
+ 2026-04-24T09:06:49.044Zhttps://docs.axolotl.ai/docs/api/utils.schemas.model.html
- 2026-04-23T04:30:41.234Z
+ 2026-04-24T09:06:49.582Zhttps://docs.axolotl.ai/docs/api/monkeypatch.lora_kernels.html
- 2026-04-23T04:30:40.948Z
+ 2026-04-24T09:06:49.296Zhttps://docs.axolotl.ai/docs/api/utils.schemas.utils.html
- 2026-04-23T04:30:41.327Z
+ 2026-04-24T09:06:49.678Zhttps://docs.axolotl.ai/docs/api/cli.checks.html
- 2026-04-23T04:30:40.194Z
+ 2026-04-24T09:06:48.536Zhttps://docs.axolotl.ai/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html
- 2026-04-23T04:30:41.027Z
+ 2026-04-24T09:06:49.375Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.dpo.chatml.html
- 2026-04-23T04:30:40.738Z
+ 2026-04-24T09:06:49.085Zhttps://docs.axolotl.ai/docs/api/utils.chat_templates.html
- 2026-04-23T04:30:41.038Z
+ 2026-04-24T09:06:49.386Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.orpo.chat_template.html
- 2026-04-23T04:30:40.792Z
+ 2026-04-24T09:06:49.139Zhttps://docs.axolotl.ai/docs/api/monkeypatch.utils.html
- 2026-04-23T04:30:40.955Z
+ 2026-04-24T09:06:49.303Zhttps://docs.axolotl.ai/docs/api/utils.callbacks.profiler.html
- 2026-04-23T04:30:41.665Z
+ 2026-04-24T09:06:50.020Zhttps://docs.axolotl.ai/docs/api/utils.schemas.integrations.html
- 2026-04-23T04:30:41.309Z
+ 2026-04-24T09:06:49.659Zhttps://docs.axolotl.ai/docs/api/core.training_args.html
- 2026-04-23T04:30:40.038Z
+ 2026-04-24T09:06:48.377Zhttps://docs.axolotl.ai/docs/api/cli.utils.sweeps.html
- 2026-04-23T04:30:40.339Z
+ 2026-04-24T09:06:48.685Zhttps://docs.axolotl.ai/docs/api/cli.art.html
- 2026-04-23T04:30:40.186Z
+ 2026-04-24T09:06:48.528Zhttps://docs.axolotl.ai/docs/api/monkeypatch.relora.html
- 2026-04-23T04:30:40.910Z
+ 2026-04-24T09:06:49.257Zhttps://docs.axolotl.ai/docs/api/prompt_tokenizers.html
- 2026-04-23T04:30:39.990Z
+ 2026-04-24T09:06:48.326Zhttps://docs.axolotl.ai/docs/api/cli.args.html
- 2026-04-23T04:30:40.181Z
+ 2026-04-24T09:06:48.523Zhttps://docs.axolotl.ai/docs/api/utils.collators.core.html
- 2026-04-23T04:30:41.568Z
+ 2026-04-24T09:06:49.921Zhttps://docs.axolotl.ai/docs/api/utils.distributed.html
- 2026-04-23T04:30:41.155Z
+ 2026-04-24T09:06:49.502Zhttps://docs.axolotl.ai/docs/api/cli.evaluate.html
- 2026-04-23T04:30:40.157Z
+ 2026-04-24T09:06:48.498Zhttps://docs.axolotl.ai/docs/api/evaluate.html
- 2026-04-23T04:30:39.913Z
+ 2026-04-24T09:06:48.248Zhttps://docs.axolotl.ai/docs/api/prompt_strategies.metharme.html
- 2026-04-23T04:30:40.684Z
+ 2026-04-24T09:06:49.031Zhttps://docs.axolotl.ai/docs/api/utils.callbacks.lisa.html
- 2026-04-23T04:30:41.667Z
+ 2026-04-24T09:06:50.022Zhttps://docs.axolotl.ai/docs/api/cli.utils.args.html
- 2026-04-23T04:30:40.317Z
+ 2026-04-24T09:06:48.663Zhttps://docs.axolotl.ai/docs/api/utils.schemas.training.html
- 2026-04-23T04:30:41.242Z
+ 2026-04-24T09:06:49.591Zhttps://docs.axolotl.ai/docs/api/core.trainers.mixins.optimizer.html
- 2026-04-23T04:30:40.504Z
+ 2026-04-24T09:06:48.852Zhttps://docs.axolotl.ai/docs/api/cli.utils.train.html
- 2026-04-23T04:30:40.354Z
+ 2026-04-24T09:06:48.700Zhttps://docs.axolotl.ai/docs/faq.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/dataset_preprocessing.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/dataset-formats/inst_tune.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/dataset-formats/pretraining.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/dataset-formats/index.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/dataset-formats/stepwise_supervised.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/cli.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.830Zhttps://docs.axolotl.ai/docs/config-reference.html
- 2026-04-23T04:31:05.516Z
+ 2026-04-24T09:07:13.786Zhttps://docs.axolotl.ai/docs/debugging.html
- 2026-04-23T04:27:41.734Z
+ 2026-04-24T09:04:04.832Zhttps://docs.axolotl.ai/docs/models/magistral/vision.html
- 2026-04-23T04:31:06.634Z
+ 2026-04-24T09:07:14.952Zhttps://docs.axolotl.ai/docs/models/trinity.html
- 2026-04-23T04:31:06.631Z
+ 2026-04-24T09:07:14.948Zhttps://docs.axolotl.ai/docs/models/gpt-oss.html
- 2026-04-23T04:31:06.638Z
+ 2026-04-24T09:07:14.956Zhttps://docs.axolotl.ai/docs/models/LiquidAI.html
- 2026-04-23T04:31:06.640Z
+ 2026-04-24T09:07:14.957Zhttps://docs.axolotl.ai/docs/models/granite4.html
- 2026-04-23T04:31:06.640Z
+ 2026-04-24T09:07:14.957Zhttps://docs.axolotl.ai/docs/models/voxtral.html
- 2026-04-23T04:31:06.635Z
+ 2026-04-24T09:07:14.952Zhttps://docs.axolotl.ai/docs/models/mistral-small.html
- 2026-04-23T04:31:06.635Z
+ 2026-04-24T09:07:14.952Zhttps://docs.axolotl.ai/docs/models/llama-4.html
- 2026-04-23T04:31:06.636Z
+ 2026-04-24T09:07:14.953Zhttps://docs.axolotl.ai/docs/models/llama-2.html
- 2026-04-23T04:31:06.636Z
+ 2026-04-24T09:07:14.954Zhttps://docs.axolotl.ai/docs/models/jamba.html
- 2026-04-23T04:31:06.640Z
+ 2026-04-24T09:07:14.958Zhttps://docs.axolotl.ai/docs/models/ministral3/think.html
- 2026-04-23T04:31:06.632Z
+ 2026-04-24T09:07:14.950Zhttps://docs.axolotl.ai/docs/models/orpheus.html
- 2026-04-23T04:31:06.641Z
+ 2026-04-24T09:07:14.958Zhttps://docs.axolotl.ai/docs/models/index.html
- 2026-04-23T04:31:06.641Z
+ 2026-04-24T09:07:14.958Zhttps://docs.axolotl.ai/docs/models/olmo3.html
- 2026-04-23T04:31:06.631Z
+ 2026-04-24T09:07:14.948Zhttps://docs.axolotl.ai/docs/models/magistral.html
- 2026-04-23T04:31:06.634Z
+ 2026-04-24T09:07:14.951Zhttps://docs.axolotl.ai/docs/models/ministral3.html
- 2026-04-23T04:31:06.632Z
+ 2026-04-24T09:07:14.950Zhttps://docs.axolotl.ai/docs/models/plano.html
- 2026-04-23T04:31:06.630Z
+ 2026-04-24T09:07:14.947Zhttps://docs.axolotl.ai/index.html
- 2026-04-23T04:27:41.760Z
+ 2026-04-24T09:04:04.867Z