diff --git a/.nojekyll b/.nojekyll index 319a21780..d619d23c5 100644 --- a/.nojekyll +++ b/.nojekyll @@ -1 +1 @@ -54f9d0fe \ No newline at end of file +10c27839 \ No newline at end of file diff --git a/docs/config-reference.html b/docs/config-reference.html index cc098cfee..40876ec96 100644 --- a/docs/config-reference.html +++ b/docs/config-reference.html @@ -1550,264 +1550,268 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); # Trust remote code for untrusted source trust_remote_code: bool | None -# Where to save the full-finetuned model to -output_dir: str = ./model-out -# push checkpoints to hub -hub_model_id: str | None -# how to push checkpoints to hub -hub_strategy: str | None -# Save model as safetensors (require safetensors package). Default True -save_safetensors: bool | None = True - -# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer -load_in_8bit: bool | None = False -# Use bitsandbytes 4 bit -load_in_4bit: bool | None = False - -# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in -# original model -adapter: str | None -# If you already have a lora model trained that you want to load, put that here. This -# means after training, if you want to test the model, you should set this to the value -# of `output_dir`. Note that if you merge an adapter to the base model, a new -# subdirectory `merged` will be created under the `output_dir`. -lora_model_dir: str | None -lora_r: int | None -lora_alpha: int | None -lora_fan_in_fan_out: bool | None -lora_target_modules: str | list[str] | None -lora_target_parameters: str | list[str] | None -# If true, will target all linear modules -lora_target_linear: bool | None -# If you added new tokens to the tokenizer, you may need to save some LoRA modules -# because they need to know the new tokens. For LLaMA and Mistral, you need to save -# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts -# tokens to embeddings, and `lm_head` converts embeddings to token probabilities. -lora_modules_to_save: list[str] | None -lora_dropout: float | None = 0.0 -# The layer indices to transform, otherwise, apply to all layers -peft_layers_to_transform: list[int] | None -peft_layers_pattern: list[str] | None - -peft: PeftConfig | None - # For PeftConfig: - # Configuration options for loftq initialization for LoRA - loftq_config: LoftQConfig | None - # For LoftQConfig: - # typically 4 bits - loftq_bits: int = 4 - -# Whether to use DoRA. -peft_use_dora: bool | None -# Whether to use RSLoRA. -peft_use_rslora: bool | None -# List of layer indices to replicate. -peft_layer_replication: list[tuple[int, int]] | None -# How to initialize LoRA weights. Default to True which is MS original implementation. -peft_init_lora_weights: bool | str | None - -# load qlora model in sharded format for FSDP using answer.ai technique. -qlora_sharded_model_loading: bool | None = False -# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it -# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge -lora_on_cpu: bool | None -# Whether you are training a 4-bit GPTQ quantized model -gptq: bool | None -# optional overrides to the bnb 4bit quantization configuration -bnb_config_kwargs: dict[str, Any] | None - -# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4. -loraplus_lr_ratio: float | None -# loraplus learning rate for lora embedding layers. Default value is 1e-6. -loraplus_lr_embedding: float | None = 1e-06 - -merge_lora: bool | None - -# Whether to use ReLoRA. Use with jagged_restart_*steps options. -relora: bool | None -# threshold for optimizer magnitude when pruning -relora_prune_ratio: float | None -# True to perform lora weight merges on cpu during restarts, for modest gpu memory -# savings -relora_cpu_offload: bool | None - -# how often to reset for jagged restarts -jagged_restart_steps: int | None -# how many warmup steps to take after reset for jagged restarts -jagged_restart_warmup_steps: int | None -# how many anneal steps to take before reset for jagged restarts -jagged_restart_anneal_steps: int | None - -# If greater than 1, backpropagation will be skipped and the gradients will be -# accumulated for the given number of steps. -gradient_accumulation_steps: int | None = 1 -# The number of samples to include in each batch. This is the number of samples sent to -# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps -micro_batch_size: int | None = 1 -# Total batch size, we do not recommended setting this manually -batch_size: int | None -# per gpu micro batch size for evals, defaults to value of micro_batch_size -eval_batch_size: int | None - -# whether to find batch size that fits in memory. Passed to underlying transformers -# Trainer -auto_find_batch_size: bool | None +# Don't move the model to the device before sharding. This is an experimental feature +# that may be included in the future as the default. +experimental_skip_move_to_device: bool | None + +# Where to save the full-finetuned model to +output_dir: str = ./model-out +# push checkpoints to hub +hub_model_id: str | None +# how to push checkpoints to hub +hub_strategy: str | None +# Save model as safetensors (require safetensors package). Default True +save_safetensors: bool | None = True + +# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer +load_in_8bit: bool | None = False +# Use bitsandbytes 4 bit +load_in_4bit: bool | None = False + +# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in +# original model +adapter: str | None +# If you already have a lora model trained that you want to load, put that here. This +# means after training, if you want to test the model, you should set this to the value +# of `output_dir`. Note that if you merge an adapter to the base model, a new +# subdirectory `merged` will be created under the `output_dir`. +lora_model_dir: str | None +lora_r: int | None +lora_alpha: int | None +lora_fan_in_fan_out: bool | None +lora_target_modules: str | list[str] | None +lora_target_parameters: str | list[str] | None +# If true, will target all linear modules +lora_target_linear: bool | None +# If you added new tokens to the tokenizer, you may need to save some LoRA modules +# because they need to know the new tokens. For LLaMA and Mistral, you need to save +# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts +# tokens to embeddings, and `lm_head` converts embeddings to token probabilities. +lora_modules_to_save: list[str] | None +lora_dropout: float | None = 0.0 +# The layer indices to transform, otherwise, apply to all layers +peft_layers_to_transform: list[int] | None +peft_layers_pattern: list[str] | None + +peft: PeftConfig | None + # For PeftConfig: + # Configuration options for loftq initialization for LoRA + loftq_config: LoftQConfig | None + # For LoftQConfig: + # typically 4 bits + loftq_bits: int = 4 + +# Whether to use DoRA. +peft_use_dora: bool | None +# Whether to use RSLoRA. +peft_use_rslora: bool | None +# List of layer indices to replicate. +peft_layer_replication: list[tuple[int, int]] | None +# How to initialize LoRA weights. Default to True which is MS original implementation. +peft_init_lora_weights: bool | str | None + +# load qlora model in sharded format for FSDP using answer.ai technique. +qlora_sharded_model_loading: bool | None = False +# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it +# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge +lora_on_cpu: bool | None +# Whether you are training a 4-bit GPTQ quantized model +gptq: bool | None +# optional overrides to the bnb 4bit quantization configuration +bnb_config_kwargs: dict[str, Any] | None + +# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4. +loraplus_lr_ratio: float | None +# loraplus learning rate for lora embedding layers. Default value is 1e-6. +loraplus_lr_embedding: float | None = 1e-06 + +merge_lora: bool | None + +# Whether to use ReLoRA. Use with jagged_restart_*steps options. +relora: bool | None +# threshold for optimizer magnitude when pruning +relora_prune_ratio: float | None +# True to perform lora weight merges on cpu during restarts, for modest gpu memory +# savings +relora_cpu_offload: bool | None + +# how often to reset for jagged restarts +jagged_restart_steps: int | None +# how many warmup steps to take after reset for jagged restarts +jagged_restart_warmup_steps: int | None +# how many anneal steps to take before reset for jagged restarts +jagged_restart_anneal_steps: int | None + +# If greater than 1, backpropagation will be skipped and the gradients will be +# accumulated for the given number of steps. +gradient_accumulation_steps: int | None = 1 +# The number of samples to include in each batch. This is the number of samples sent to +# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps +micro_batch_size: int | None = 1 +# Total batch size, we do not recommended setting this manually +batch_size: int | None +# per gpu micro batch size for evals, defaults to value of micro_batch_size +eval_batch_size: int | None -# Whether to mask out or include the human's prompt from the training labels -train_on_inputs: bool | None = False -# Group similarly sized data to minimize padding. May be slower to start, as it must -# download and sort the entire dataset. Note that training loss may have an oscillating -# pattern with this enabled. -group_by_length: bool | None - -learning_rate: str | float (required) -embedding_lr: float | None -embedding_lr_scale: float | None -# Specify weight decay -weight_decay: float | None = 0.0 -# Specify optimizer -optimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED -# Dictionary of arguments to pass to the optimizer -optim_args: str | dict[str, Any] | None -# The target modules to optimize, i.e. the module names that you would like to train, -# right now this is used only for GaLore algorithm -optim_target_modules: list[str] | Literal['all_linear'] | None -# Path to torch distx for optim 'adamw_anyprecision' -torchdistx_path: str | None -lr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE -# Specify a scheduler and kwargs to use with the optimizer -lr_scheduler_kwargs: dict[str, Any] | None -lr_quadratic_warmup: bool | None -# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of -# peak lr -cosine_min_lr_ratio: float | None -# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means -# start cosine_min_lr at 80% of training step -cosine_constant_lr_ratio: float | None -# Learning rate div factor -lr_div_factor: float | None - -lr_groups: list[LrGroup] | None - # For LrGroup: - name: str (required) - modules: list[str] (required) - lr: float (required) - -# adamw hyperparams -adam_epsilon: float | None -# only used for CAME Optimizer -adam_epsilon2: float | None +# whether to find batch size that fits in memory. Passed to underlying transformers +# Trainer +auto_find_batch_size: bool | None + +# Whether to mask out or include the human's prompt from the training labels +train_on_inputs: bool | None = False +# Group similarly sized data to minimize padding. May be slower to start, as it must +# download and sort the entire dataset. Note that training loss may have an oscillating +# pattern with this enabled. +group_by_length: bool | None + +learning_rate: str | float (required) +embedding_lr: float | None +embedding_lr_scale: float | None +# Specify weight decay +weight_decay: float | None = 0.0 +# Specify optimizer +optimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED +# Dictionary of arguments to pass to the optimizer +optim_args: str | dict[str, Any] | None +# The target modules to optimize, i.e. the module names that you would like to train, +# right now this is used only for GaLore algorithm +optim_target_modules: list[str] | Literal['all_linear'] | None +# Path to torch distx for optim 'adamw_anyprecision' +torchdistx_path: str | None +lr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE +# Specify a scheduler and kwargs to use with the optimizer +lr_scheduler_kwargs: dict[str, Any] | None +lr_quadratic_warmup: bool | None +# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of +# peak lr +cosine_min_lr_ratio: float | None +# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means +# start cosine_min_lr at 80% of training step +cosine_constant_lr_ratio: float | None +# Learning rate div factor +lr_div_factor: float | None + +lr_groups: list[LrGroup] | None + # For LrGroup: + name: str (required) + modules: list[str] (required) + lr: float (required) + # adamw hyperparams -adam_beta1: float | None -# adamw hyperparams -adam_beta2: float | None -# only used for CAME Optimizer -adam_beta3: float | None - -# Dion Optimizer learning rate -dion_lr: float | None -# Dion Optimizer momentum -dion_momentum: float | None -# Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank -# dimension. -dion_rank_fraction: float | None = 1.0 -# Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may -# be useful to ensure even sharding. -dion_rank_multiple_of: int | None = 1 - -# Gradient clipping max norm -max_grad_norm: float | None -num_epochs: float = 1.0 +adam_epsilon: float | None +# only used for CAME Optimizer +adam_epsilon2: float | None +# adamw hyperparams +adam_beta1: float | None +# adamw hyperparams +adam_beta2: float | None +# only used for CAME Optimizer +adam_beta3: float | None + +# Dion Optimizer learning rate +dion_lr: float | None +# Dion Optimizer momentum +dion_momentum: float | None +# Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank +# dimension. +dion_rank_fraction: float | None = 1.0 +# Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may +# be useful to ensure even sharding. +dion_rank_multiple_of: int | None = 1 -use_wandb: bool | None -# Set the name of your wandb run -wandb_name: str | None -# Set the ID of your wandb run -wandb_run_id: str | None -# "offline" to save run metadata locally and not sync to the server, "disabled" to turn -# off wandb -wandb_mode: str | None -# Your wandb project name -wandb_project: str | None -# A wandb Team name if using a Team -wandb_entity: str | None -wandb_watch: str | None -# "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only -# at the end of training -wandb_log_model: str | None - -use_mlflow: bool | None -# URI to mlflow -mlflow_tracking_uri: str | None -# Your experiment name -mlflow_experiment_name: str | None -# Your run name -mlflow_run_name: str | None -# set to true to copy each saved checkpoint on each save to mlflow artifact registry -hf_mlflow_log_artifacts: bool | None - -# Enable or disable Comet integration. -use_comet: bool | None -# API key for Comet. Recommended to set via `comet login`. -comet_api_key: str | None -# Workspace name in Comet. Defaults to the user's default workspace. -comet_workspace: str | None -# Project name in Comet. Defaults to Uncategorized. -comet_project_name: str | None -# Identifier for the experiment. Used to append data to an existing experiment or -# control the key of new experiments. Default to a random key. -comet_experiment_key: str | None -# Create a new experiment ("create") or log to an existing one ("get"). Default -# ("get_or_create") auto-selects based on configuration. -comet_mode: str | None -# Set to True to log data to Comet server, or False for offline storage. Default is -# True. -comet_online: bool | None -# Dictionary for additional configuration settings, see the doc for more details. -comet_experiment_config: dict[str, Any] | None - -# the number of activate layers in LISA -lisa_n_layers: int | None -# how often to switch layers in LISA -lisa_step_interval: int | None -# path under the model to access the layers -lisa_layers_attribute: str | None = model.layers - -gradio_title: str | None -gradio_share: bool | None -gradio_server_name: str | None -gradio_server_port: int | None -gradio_max_new_tokens: int | None -gradio_temperature: float | None - -use_ray: bool = False -ray_run_name: str | None -ray_num_workers: int = 1 -resources_per_worker: dict - -# The size of the image to resize to. It can be an integer (resized into padded-square -# image) or a tuple (width, height).If not provided, we will attempt to load from -# preprocessor.size, otherwise, images won't be resized. -image_size: int | tuple[int, int] | None -# The resampling algorithm to use for image resizing. Default is bilinear. Please refer -# to PIL.Image.Resampling for more details. -image_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None - -# optional overrides to the base model configuration -overrides_of_model_config: dict[str, Any] | None -# optional overrides the base model loading from_pretrained -overrides_of_model_kwargs: dict[str, Any] | None -# If you want to specify the type of model to load, AutoModelForCausalLM is a good -# choice too -type_of_model: str | None -# You can specify to choose a specific model revision from huggingface hub -revision_of_model: str | None - -max_packed_sequence_len: int | None -rope_scaling: Any | None -noisy_embedding_alpha: float | None -dpo_beta: float | None -evaluation_strategy: str | None +# Gradient clipping max norm +max_grad_norm: float | None +num_epochs: float = 1.0 + +use_wandb: bool | None +# Set the name of your wandb run +wandb_name: str | None +# Set the ID of your wandb run +wandb_run_id: str | None +# "offline" to save run metadata locally and not sync to the server, "disabled" to turn +# off wandb +wandb_mode: str | None +# Your wandb project name +wandb_project: str | None +# A wandb Team name if using a Team +wandb_entity: str | None +wandb_watch: str | None +# "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only +# at the end of training +wandb_log_model: str | None + +use_mlflow: bool | None +# URI to mlflow +mlflow_tracking_uri: str | None +# Your experiment name +mlflow_experiment_name: str | None +# Your run name +mlflow_run_name: str | None +# set to true to copy each saved checkpoint on each save to mlflow artifact registry +hf_mlflow_log_artifacts: bool | None + +# Enable or disable Comet integration. +use_comet: bool | None +# API key for Comet. Recommended to set via `comet login`. +comet_api_key: str | None +# Workspace name in Comet. Defaults to the user's default workspace. +comet_workspace: str | None +# Project name in Comet. Defaults to Uncategorized. +comet_project_name: str | None +# Identifier for the experiment. Used to append data to an existing experiment or +# control the key of new experiments. Default to a random key. +comet_experiment_key: str | None +# Create a new experiment ("create") or log to an existing one ("get"). Default +# ("get_or_create") auto-selects based on configuration. +comet_mode: str | None +# Set to True to log data to Comet server, or False for offline storage. Default is +# True. +comet_online: bool | None +# Dictionary for additional configuration settings, see the doc for more details. +comet_experiment_config: dict[str, Any] | None + +# the number of activate layers in LISA +lisa_n_layers: int | None +# how often to switch layers in LISA +lisa_step_interval: int | None +# path under the model to access the layers +lisa_layers_attribute: str | None = model.layers + +gradio_title: str | None +gradio_share: bool | None +gradio_server_name: str | None +gradio_server_port: int | None +gradio_max_new_tokens: int | None +gradio_temperature: float | None + +use_ray: bool = False +ray_run_name: str | None +ray_num_workers: int = 1 +resources_per_worker: dict + +# The size of the image to resize to. It can be an integer (resized into padded-square +# image) or a tuple (width, height).If not provided, we will attempt to load from +# preprocessor.size, otherwise, images won't be resized. +image_size: int | tuple[int, int] | None +# The resampling algorithm to use for image resizing. Default is bilinear. Please refer +# to PIL.Image.Resampling for more details. +image_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None + +# optional overrides to the base model configuration +overrides_of_model_config: dict[str, Any] | None +# optional overrides the base model loading from_pretrained +overrides_of_model_kwargs: dict[str, Any] | None +# If you want to specify the type of model to load, AutoModelForCausalLM is a good +# choice too +type_of_model: str | None +# You can specify to choose a specific model revision from huggingface hub +revision_of_model: str | None + +max_packed_sequence_len: int | None +rope_scaling: Any | None +noisy_embedding_alpha: float | None +dpo_beta: float | None +evaluation_strategy: str | None diff --git a/search.json b/search.json index 95645361c..7f1ece496 100644 --- a/search.json +++ b/search.json @@ -1306,7 +1306,7 @@ "href": "docs/config-reference.html", "title": "Config Reference", "section": "", - "text": "# Allow overwrite yml config using from cli\nstrict: bool | None = False\n# Resume from a specific checkpoint dir\nresume_from_checkpoint: str | None\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: bool | None\n# Resize the model embeddings when new tokens are added to multiples of 32. This is\n# reported to improve training speed on some models\nresize_token_embeddings_to_32x: bool | None\nmean_resizing_embeddings: bool | None = False\n\n# Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings: bool | None\n# Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs\nembeddings_skip_upcast: bool | None\n\n# Use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl: RLType | None\n\ntrl: TRLConfig | None\n # For TRLConfig:\n # Beta parameter for the RL training. Same as `rl_beta`. Use\n beta: float | None\n # Maximum length of the completion for RL training.\n max_completion_length: int | None\n\n # Whether to use VLLM for RL training.\n use_vllm: bool = False\n # VLLM mode to use, one of 'server' or 'colocate'\n vllm_mode: Literal['server', 'colocate'] | None\n # Host of the vLLM server to connect to.\n vllm_server_host: str | None = 0.0.0.0\n # Port of the vLLM server to connect to.\n vllm_server_port: int | None = 8000\n # Total timeout (in seconds) to wait for the vLLM server to respond.\n vllm_server_timeout: int | None\n # Regex for vLLM guided decoding.\n vllm_guided_decoding_regex: str | None\n\n # List of reward functions to load. Paths must be importable from current dir.\n reward_funcs: list[str] | None\n # List of reward weights for the reward functions.\n reward_weights: list[float] | None\n # Number of generations to sample.\n num_generations: int | None\n # Whether to log completions.\n log_completions: bool | None = False\n # Number of completions to print when log_completions is True.\n num_completions_to_print: int | None\n # Controls whether importance sampling ratios are computed at the `'token'` or\n # `'sequence'` level. For GSPO, use `sequence`, default is None which corresponds to\n # the original GRPO paper.\n importance_sampling_level: Literal['sequence', 'token'] | None\n\n # Whether to sync the reference model.\n sync_ref_model: bool | None = False\n # Mixup alpha for the reference model.\n ref_model_mixup_alpha: float | None = 0.9\n # Sync steps for the reference model.\n ref_model_sync_steps: int | None = 64\n # Whether to scale rewards by their standard deviation.\n scale_rewards: bool = True\n\n # Sampling temperature for the GRPO policy.\n temperature: float | None\n # Top-p sampling probability for the generation policy.\n top_p: float | None\n # Top-k sampling for the generation policy.\n top_k: int | None\n # Minimum probability for the generation policy.\n min_p: float | None\n # Penalty for tokens that appear in prompt and generated text.\n repetition_penalty: float | None\n # Number of iterations per batch (μ) for GRPO.\n num_iterations: int | None\n # Epsilon value for clipping in the GRPO algorithm.\n epsilon: float | None\n # Upper-bound epsilon value for clipping in the GRPO algorithm.\n epsilon_high: float | None\n # Whether to use Liger loss for GRPO.\n use_liger_loss: bool | None\n # Loss formulation to use. Supported values: grpo, bnpo, dr_grpo.\n loss_type: str | None\n # Whether to exclude truncated completions from loss calculation.\n mask_truncated_completions: bool = False\n\nvllm: VllmConfig | None\n # For VllmConfig:\n # Device to use for VLLM\n device: str | None = auto\n # Tensor parallel size for VLLM\n tensor_parallel_size: int | None\n # Data parallel size for VLLM\n data_parallel_size: int | None\n # GPU memory utilization for VLLM\n gpu_memory_utilization: float | None = 0.9\n # Data type for VLLM\n dtype: str | None = auto\n # Maximum length of the model context for VLLM\n max_model_len: int | None\n # Enable prefix caching for VLLM\n enable_prefix_caching: bool | None\n # Host for the vLLM server to start on\n host: str | None = 0.0.0.0\n # Port of the vLLM server to start on\n port: int | None = 8000\n\n # Enable reasoning for VLLM\n enable_reasoning: bool | None\n # Reasoning parser for VLLM\n reasoning_parser: str | None\n\nqat: QATConfig | None\n # For QATConfig:\n # Fake quantization layout to use for activation quantization. Valid options are\n # \"int4\" and \"int8\"\n activation_dtype: TorchIntDType | None\n # Fake quantization layout to use for weight quantization. Valid options are \"int4\"\n # and \"int8\"\n weight_dtype: TorchIntDType = TorchIntDType.int8\n # Quantize embedding\n quantize_embedding: bool | None = False\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n # The number of steps to apply fake quantization after\n fake_quant_after_n_steps: int | None\n\nquantization: PTQConfig | None\n # For PTQConfig:\n # Fake quantization layout to use for weight quantization. Valid options are uintX for\n # X in [1, 2, 3, 4, 5, 6, 7], or int4, or int8\n weight_dtype: TorchIntDType = TorchIntDType.int8\n # Fake quantization layout to use for activation quantization. Valid options are\n # \"int4\" and \"int8\"\n activation_dtype: TorchIntDType | None\n # Whether to quantize the embedding layer.\n quantize_embedding: bool | None\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n\n# Reward modelling: `True` or `False`\nreward_model: bool | None\n# Process reward modelling: `True` or `False`\nprocess_reward_model: bool | None\nnum_labels: int | None\n\n# Whether to perform weighting in DPO trainer\ndpo_use_weighting: bool | None\ndpo_use_logits_to_keep: bool | None\ndpo_label_smoothing: float | None\ndpo_norm_loss: bool | None\ndpo_padding_free: bool | None\ndpo_generate_during_eval: bool | None\n\n# A list of one or more datasets to finetune the model with\ndatasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n# A list of one or more datasets to eval the model with. You can use either\n# test_datasets, or val_set_size, but not both.\ntest_datasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n# If false, the datasets will not be shuffled and will keep their original order in\n# `datasets`. The same applies to the `test_datasets` option and the\n# `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: bool | None = True\n# If true, each dataset in `datasets` will be shuffled before merging. This allows\n# curriculum learning strategies to be applied at the dataset level. Default is false.\nshuffle_before_merging_datasets: bool | None = False\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: str | None\n# Num shards for whole dataset\ndataset_shard_num: int | None\n# Index of shard to use for whole dataset\ndataset_shard_idx: int | None\nskip_prepare_dataset: bool | None = False\n# Number of shards to save the prepared dataset\nnum_dataset_shards_to_save: int | None\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset: Annotated[list[PretrainingDataset | SFTDataset], MinLen(1)] | None\n # For PretrainingDataset:\n name: str | None\n path: str | None\n split: str | None = train\n text_column: str | None = text\n type: str | None = pretrain\n trust_remote_code: bool | None = False\n data_files: str | None\n skip: int | None\n\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n# The maximum number of processes to use while preprocessing your input dataset. This\n# defaults to `os.cpu_count()` if not set. For Runpod VMs, it will default to number of\n# vCPUs via RUNPOD_CPU_COUNT.\ndataset_processes: int | None\n# Deduplicates datasets and test_datasets with identical entries\ndataset_exact_deduplication: bool | None\n# Keep dataset in memory while preprocessing. Only needed if cached dataset is taking\n# too much storage\ndataset_keep_in_memory: bool | None\ndataloader_pin_memory: bool | None\ndataloader_num_workers: int | None\ndataloader_prefetch_factor: int | None\ndataloader_drop_last: bool | None\n\naccelerator_config: dict[str, Any] | None\n\nremove_unused_columns: bool | None\n\n# Push prepared dataset to hub - repo_org/repo_name\npush_dataset_to_hub: str | None\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private\n# datasets. Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: bool | None\n\ndevice: Any | None\n# Passed through to transformers when loading the model when launched without\n# accelerate. Use `sequential` when training w/ model parallelism to limit memory\ndevice_map: Any | None\nworld_size: int | None\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank: int | None\nddp: bool | None\n\n# Seed for reproducibility\nseed: int | None\n# Advanced DDP Arguments - timeout\nddp_timeout: int | None\n# Advanced DDP Arguments - bucket cap in MB\nddp_bucket_cap_mb: int | None\n# Advanced DDP Arguments - broadcast buffers\nddp_broadcast_buffers: bool | None\nddp_find_unused_parameters: bool | None\n\n# Approximate number of predictions sent to wandb depending on batch size. Enabled above\n# 0. Default is 0\neval_table_size: int | None\n# Total number of tokens generated for predictions sent to wandb. Default is 128\neval_max_new_tokens: int | None\n# Whether to run causal language model evaluation for metrics in\n# `eval_causal_lm_metrics`\ndo_causal_lm_eval: bool | None\n# HF evaluate metrics used during evaluation. Default is ['sacrebleu', 'comet', 'ter',\n# 'chrf', 'perplexity']\neval_causal_lm_metrics: list[str] | None\ndo_bench_eval: bool | None\nbench_dataset: str | None\nbench_split: str | None\nmetric_for_best_model: str | None\ngreater_is_better: bool | None\n\n# High loss value, indicating the learning has broken down (a good estimate is ~2 times\n# the loss at the start of training)\nloss_watchdog_threshold: float | None\n# Number of high-loss steps in a row before the trainer aborts (default: 3)\nloss_watchdog_patience: int | None\n\n# Run garbage collection every `gc_steps` steps. -1 will run on epoch end and before\n# evaluations. Default is 0 (disabled).\ngc_steps: int | None\n\n# Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection.\n# require >=ampere\nbf16: Literal['auto'] | bool | None = auto\n# Use CUDA fp16\nfp16: bool | None\n# Enable FP8 mixed precision training using TorchAO. Best used in combination with\n# torch.compile.\nfp8: bool | None\n# Enable FSDP float8 all-gather optimization for FP8 training. Can improve training\n# speed by 10-15% when FSDP is enabled.\nfp8_enable_fsdp_float8_all_gather: bool | None\n# No AMP (automatic mixed precision) - require >=ampere\nbfloat16: bool | None\n# No AMP (automatic mixed precision)\nfloat16: bool | None\n# Use CUDA tf32 - require >=ampere\ntf32: bool | None\nfloat32: bool | None\n\n# Whether to use gradient checkpointing. Available options are: true, false, 'offload',\n# 'offload_disk'.\n# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: Literal['offload', 'offload_disk'] | bool | None = False\n# Additional kwargs to pass to the trainer for gradient checkpointing\ngradient_checkpointing_kwargs: dict[str, Any] | None\n# Whether to offload activations. Available options are: true, false, 'legacy', 'disk'.\nactivation_offloading: Literal['legacy', 'disk'] | bool | None = False\n\nunfrozen_parameters: list[str] | None\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: int = 512\n# The maximum length of an input for evaluation. If not specified, defaults to\n# sequence_len\neval_sequence_len: int | None\nmin_sample_len: int | None\n# maximum prompt length for RL training\nmax_prompt_len: int = 512\n# Use efficient multi-packing with block diagonal attention and per sequence\n# position_ids. Recommend set to 'true'\nsample_packing: bool | None\n# The number of samples packed at a time. Increasing the following values helps with\n# packing, but usually only slightly (<%1.)\nsample_packing_group_size: int | None = 100000\n# The number of samples which can be packed into one sequence. Increase if using a large\n# sequence_len with many short samples.\nsample_packing_bin_size: int | None = 200\n# Whether to pack samples sequentially\nsample_packing_sequentially: bool | None\n# The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or\n# 'forkserver'\nsample_packing_mp_start_method: str | None\n# Set to 'false' if getting errors during eval with sample_packing on\neval_sample_packing: bool | None\n# Pad inputs so each step uses constant sized buffers. This will reduce memory\n# fragmentation and may prevent OOMs, by re-using memory more efficiently. Defaults to\n# True if `sample_packing` enabled\npad_to_sequence_len: bool | None\n# Whether to use sequential sampling for curriculum learning\ncurriculum_sampling: bool | None\nmultipack_real_batches: bool | None\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation: bool | None\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening: Literal['auto'] | bool | None\n\nuse_pose: bool | None\npose_split_on_token_ids: list[int] | None\npose_max_context_len: int | None\npose_num_chunks: int | None\n\npretrain_multipack_buffer_size: int | None = 10000\n# whether to prevent cross attention for packed sequences during pretraining\npretrain_multipack_attn: bool | None = True\n\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers\nxformers_attention: bool | None\n# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/\n# torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention: bool | None\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention: bool | None\nflex_attention: bool | None\nflex_attn_compile_kwargs: dict[str, Any] | None\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention\nflash_attention: bool | None\n# Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_cross_entropy: bool | None\n# Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_rms_norm: bool | None\n# Whether to fuse QKV into a single operation\nflash_attn_fuse_qkv: bool | None\n# Whether to fuse part of the MLP into a single operation\nflash_attn_fuse_mlp: bool | None\n# Whether to use bettertransformers\nflash_optimum: bool | None\n\neager_attention: bool | None\n\nunsloth_cross_entropy_loss: bool | None\nunsloth_lora_mlp: bool | None\nunsloth_lora_qkv: bool | None\nunsloth_lora_o: bool | None\nunsloth_rms_norm: bool | None\nunsloth_rope: bool | None\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_mlp_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_qkv_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_o_kernel: bool | None\n\n# Whether to use chunked cross entropy loss for memory efficiency\nchunked_cross_entropy: bool | None\n# Number of chunks to use for chunked cross entropy loss\nchunked_cross_entropy_num_chunks: int | None\n\n# Whether to use ALST tiled mlp for memory efficient long context\ntiled_mlp: bool | None\n\n# Number of shards to use for ALST tiled mlp. If unset, it will be set based on\n# seqlen/hidden_size\ntiled_mlp_num_shards: int | None\n\n# Whether to use original mlp for ALST tiled mlp. Otherwise uses a generic MLP based on\n# llama.\ntiled_mlp_use_original_mlp: bool | None = True\n\nllama4_linearized_experts: bool | None\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed: str | dict[str, Any] | None\n# Whether to use deepcompile for faster training with deepspeed\ndeepcompile: bool | None\n# FSDP configuration\nfsdp: list[str] | None\n\n# FSDP configuration options\nfsdp_config: dict[str, Any] | None\n# FSDP version\nfsdp_version: int | None\nfsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for\n# no eval.\nval_set_size: float | None = 0.0\n\n# Number of devices to shard across. If not set, will use all available devices.\ndp_shard_size: int | None\n# Number of devices to replicate across.\ndp_replicate_size: int | None\n# Deprecated: use `context_parallel_size` instead\nsequence_parallel_degree: int | None\n# Set to a divisor of the number of GPUs available to split sequences into chunks of\n# equal size. Use in long context training to prevent OOM when sequences cannot fit into\n# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each\n# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized\n# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more\n# details.\ncontext_parallel_size: int | None\n# Optional; strides across the key dimension. Larger values use more memory but should\n# make training faster. Must evenly divide the number of KV heads in your model.\nheads_k_stride: int | None\n# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to\n# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing\n# case.\nring_attn_func: RingAttnFunc | None\n# Number of tensor parallel processes in TP group. Only supported with DeepSpeed AutoTP.\ntensor_parallel_size: int | None\n\n# Add or change special tokens. If you add tokens here, you don't need to add them to\n# the `tokens` list.\nspecial_tokens: SpecialTokensConfig | None\n # For SpecialTokensConfig:\n bos_token: str | None\n eos_token: str | None\n pad_token: str | None\n unk_token: str | None\n additional_special_tokens: list[str] | None\n\n# Add extra tokens to the tokenizer\ntokens: list[str] | None\n# Mapping token_id to new_token_string to override reserved added_tokens in the\n# tokenizer. Only works for tokens that are not part of the base vocab (aka are\n# added_tokens). Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: dict[int, str] | None\n\n# Whether to use torch.compile and which backend to use. setting to `auto` will enable\n# torch compile when torch>=2.6.0\ntorch_compile: Literal['auto'] | bool | None\n# Backend to use for torch.compile\ntorch_compile_backend: str | None\ntorch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None\n\n# Maximum number of iterations to train for. It precedes num_epochs which means that if\n# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =>\n# `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps: int | None\n# Number of warmup steps. Cannot use with warmup_ratio\nwarmup_steps: int | None\n# Warmup ratio. Cannot use with warmup_steps\nwarmup_ratio: float | None\n# Leave empty to eval at each epoch, integer for every N steps. float for fraction of\n# total steps\neval_steps: int | float | None\n# Number of times per epoch to run evals, mutually exclusive with eval_steps\nevals_per_epoch: int | None\n# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer\n# from `eval_steps`\neval_strategy: str | None\n\n# Leave empty to save at each epoch, integer for every N steps. float for fraction of\n# total steps\nsave_steps: int | float | None\n# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsaves_per_epoch: int | None\n# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better\n# result is achieved, leave empty to infer from `save_steps`\nsave_strategy: str | None\n# Checkpoints saved at a time\nsave_total_limit: int | None\n# Whether to checkpoint a model after the first step of training. Defaults to False.\nsave_first_step: bool | None\n\n# Logging frequency\nlogging_steps: int | None\n# Stop training after this many evaluation losses have increased in a row. https://huggi\n# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin\n# gCallback\nearly_stopping_patience: int | None\nload_best_model_at_end: bool | None = False\n# Save only the model weights, skipping the optimizer. Using this means you can't resume\n# from checkpoints.\nsave_only_model: bool | None = False\n# Use tensorboard for logging\nuse_tensorboard: bool | None\n# Enable the pytorch profiler to capture the first N steps of training to the\n# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more\n# information. Snapshots can be visualized @ https://pytorch.org/memory_viz\nprofiler_steps: int | None\n# Which step to start the profiler at. Useful for only capturing a few steps mid-run.\nprofiler_steps_start: int | None = 0\n# bool of whether to include tokens trainer per second in the training metrics. This\n# iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second: bool | None\n\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to\n# add noise to embeddings. Currently only supported on Llama and Mistral\nneftune_noise_alpha: float | None\n\n# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to\n# `beta` in `ORPOConfig` due to trl mapping.\norpo_alpha: float | None\n# Weighting of NLL term in loss from RPO paper\nrpo_alpha: float | None\n# Target reward margin for the SimPO loss\nsimpo_gamma: float | None\n# Weight of the BC regularizer\ncpo_alpha: float | None\n\n# Factor for desirable loss term in KTO loss\nkto_desirable_weight: float | None\n# Factor for undesirable loss term in KTO loss\nkto_undesirable_weight: float | None\n# The beta parameter for the RL training\nrl_beta: float | None\n\n# Defines the max memory usage per gpu on the system. Passed through to transformers\n# when loading the model.\nmax_memory: dict[int | Literal['cpu', 'disk'], int | str] | None\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in\n# gigabytes); default: unset\ngpu_memory_limit: int | str | None\n# Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage: bool | None\n\n# The name of the chat template to use for training, following values are supported:\n# tokenizer_default: Uses the chat template that is available in the\n# tokenizer_config.json. If the chat template is not available in the tokenizer, it will\n# raise an error. This is the default value.\n# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.\n# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not\n# available in the tokenizer. jinja: Uses a custom jinja template for the chat template.\n# The custom jinja template should be provided in the chat_template_jinja field. The\n# selected chat template will be saved to the tokenizer_config.json for easier\n# inferencing\nchat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None\n# Custom jinja template or path to jinja file for chat template. This will be only used\n# if chat_template is set to `jinja` or `null` (in which case chat_template is\n# automatically set to `jinja`). Default is null.\nchat_template_jinja: str | None\n# Additional kwargs to pass to the chat template. This is useful for customizing the\n# chat template. For example, you can pass `thinking=False` to add a generation prompt\n# to the chat template.\nchat_template_kwargs: dict[str, Any] | None\n# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the\n# boundaries between conversation turns. For example: ['/INST', '</s>',\n# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is\n# useful for templates that use multiple delimiter tokens.\neot_tokens: list[str] | None\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: str | None\n\nfix_untrained_tokens: int | list[int] | None\n\nis_preprocess: bool | None\npreprocess_iterable: bool | None\n\n# Total number of tokens - internal use\ntotal_num_tokens: int | None\ntotal_supervised_tokens: int | None\n# You can set these packing optimizations AFTER starting a training at least once. The\n# trainer will provide recommended values for these values.\nsample_packing_eff_est: float | None\naxolotl_config_path: str | None\n\n# Internal use only - Used to identify which the model is based on\nis_falcon_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_llama_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on. Please note that if\n# you set this to true, `padding_side` will be set to 'left' by default\nis_mistral_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_qwen_derived_model: bool | None\n\n# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available\n# plugins or doc below for more details.\n# https://docs.axolotl.ai/docs/custom_integrations.html\nplugins: list[str] | None\n\n# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This\n# can also be a relative path to a model on disk\nbase_model: str (required)\n# If the base_model repo on hf hub doesn't include configuration .json files, You can\n# set that here, or leave this empty to default to base_model\nbase_model_config: str | None\ncls_model_config: str | None\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config: str | None\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast: bool | None\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy: bool | None\n# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-\n# common tokenizer.\ntokenizer_use_mistral_common: bool | None\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: str | None\n# transformers processor class\nprocessor_type: str | None\n# Trust remote code for untrusted source\ntrust_remote_code: bool | None\n\n# Where to save the full-finetuned model to\noutput_dir: str = ./model-out\n# push checkpoints to hub\nhub_model_id: str | None\n# how to push checkpoints to hub\nhub_strategy: str | None\n# Save model as safetensors (require safetensors package). Default True\nsave_safetensors: bool | None = True\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: bool | None = False\n# Use bitsandbytes 4 bit\nload_in_4bit: bool | None = False\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in\n# original model\nadapter: str | None\n# If you already have a lora model trained that you want to load, put that here. This\n# means after training, if you want to test the model, you should set this to the value\n# of `output_dir`. Note that if you merge an adapter to the base model, a new\n# subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir: str | None\nlora_r: int | None\nlora_alpha: int | None\nlora_fan_in_fan_out: bool | None\nlora_target_modules: str | list[str] | None\nlora_target_parameters: str | list[str] | None\n# If true, will target all linear modules\nlora_target_linear: bool | None\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules\n# because they need to know the new tokens. For LLaMA and Mistral, you need to save\n# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts\n# tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\nlora_modules_to_save: list[str] | None\nlora_dropout: float | None = 0.0\n# The layer indices to transform, otherwise, apply to all layers\npeft_layers_to_transform: list[int] | None\npeft_layers_pattern: list[str] | None\n\npeft: PeftConfig | None\n # For PeftConfig:\n # Configuration options for loftq initialization for LoRA\n loftq_config: LoftQConfig | None\n # For LoftQConfig:\n # typically 4 bits\n loftq_bits: int = 4\n\n# Whether to use DoRA.\npeft_use_dora: bool | None\n# Whether to use RSLoRA.\npeft_use_rslora: bool | None\n# List of layer indices to replicate.\npeft_layer_replication: list[tuple[int, int]] | None\n# How to initialize LoRA weights. Default to True which is MS original implementation.\npeft_init_lora_weights: bool | str | None\n\n# load qlora model in sharded format for FSDP using answer.ai technique.\nqlora_sharded_model_loading: bool | None = False\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it\n# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: bool | None\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: bool | None\n# optional overrides to the bnb 4bit quantization configuration\nbnb_config_kwargs: dict[str, Any] | None\n\n# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_ratio: float | None\n# loraplus learning rate for lora embedding layers. Default value is 1e-6.\nloraplus_lr_embedding: float | None = 1e-06\n\nmerge_lora: bool | None\n\n# Whether to use ReLoRA. Use with jagged_restart_*steps options.\nrelora: bool | None\n# threshold for optimizer magnitude when pruning\nrelora_prune_ratio: float | None\n# True to perform lora weight merges on cpu during restarts, for modest gpu memory\n# savings\nrelora_cpu_offload: bool | None\n\n# how often to reset for jagged restarts\njagged_restart_steps: int | None\n# how many warmup steps to take after reset for jagged restarts\njagged_restart_warmup_steps: int | None\n# how many anneal steps to take before reset for jagged restarts\njagged_restart_anneal_steps: int | None\n\n# If greater than 1, backpropagation will be skipped and the gradients will be\n# accumulated for the given number of steps.\ngradient_accumulation_steps: int | None = 1\n# The number of samples to include in each batch. This is the number of samples sent to\n# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: int | None = 1\n# Total batch size, we do not recommended setting this manually\nbatch_size: int | None\n# per gpu micro batch size for evals, defaults to value of micro_batch_size\neval_batch_size: int | None\n\n# whether to find batch size that fits in memory. Passed to underlying transformers\n# Trainer\nauto_find_batch_size: bool | None\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: bool | None = False\n# Group similarly sized data to minimize padding. May be slower to start, as it must\n# download and sort the entire dataset. Note that training loss may have an oscillating\n# pattern with this enabled.\ngroup_by_length: bool | None\n\nlearning_rate: str | float (required)\nembedding_lr: float | None\nembedding_lr_scale: float | None\n# Specify weight decay\nweight_decay: float | None = 0.0\n# Specify optimizer\noptimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED\n# Dictionary of arguments to pass to the optimizer\noptim_args: str | dict[str, Any] | None\n# The target modules to optimize, i.e. the module names that you would like to train,\n# right now this is used only for GaLore algorithm\noptim_target_modules: list[str] | Literal['all_linear'] | None\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path: str | None\nlr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler_kwargs: dict[str, Any] | None\nlr_quadratic_warmup: bool | None\n# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of\n# peak lr\ncosine_min_lr_ratio: float | None\n# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means\n# start cosine_min_lr at 80% of training step\ncosine_constant_lr_ratio: float | None\n# Learning rate div factor\nlr_div_factor: float | None\n\nlr_groups: list[LrGroup] | None\n # For LrGroup:\n name: str (required)\n modules: list[str] (required)\n lr: float (required)\n\n# adamw hyperparams\nadam_epsilon: float | None\n# only used for CAME Optimizer\nadam_epsilon2: float | None\n# adamw hyperparams\nadam_beta1: float | None\n# adamw hyperparams\nadam_beta2: float | None\n# only used for CAME Optimizer\nadam_beta3: float | None\n\n# Dion Optimizer learning rate\ndion_lr: float | None\n# Dion Optimizer momentum\ndion_momentum: float | None\n# Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank\n# dimension.\ndion_rank_fraction: float | None = 1.0\n# Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may\n# be useful to ensure even sharding.\ndion_rank_multiple_of: int | None = 1\n\n# Gradient clipping max norm\nmax_grad_norm: float | None\nnum_epochs: float = 1.0\n\nuse_wandb: bool | None\n# Set the name of your wandb run\nwandb_name: str | None\n# Set the ID of your wandb run\nwandb_run_id: str | None\n# \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn\n# off wandb\nwandb_mode: str | None\n# Your wandb project name\nwandb_project: str | None\n# A wandb Team name if using a Team\nwandb_entity: str | None\nwandb_watch: str | None\n# \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only\n# at the end of training\nwandb_log_model: str | None\n\nuse_mlflow: bool | None\n# URI to mlflow\nmlflow_tracking_uri: str | None\n# Your experiment name\nmlflow_experiment_name: str | None\n# Your run name\nmlflow_run_name: str | None\n# set to true to copy each saved checkpoint on each save to mlflow artifact registry\nhf_mlflow_log_artifacts: bool | None\n\n# Enable or disable Comet integration.\nuse_comet: bool | None\n# API key for Comet. Recommended to set via `comet login`.\ncomet_api_key: str | None\n# Workspace name in Comet. Defaults to the user's default workspace.\ncomet_workspace: str | None\n# Project name in Comet. Defaults to Uncategorized.\ncomet_project_name: str | None\n# Identifier for the experiment. Used to append data to an existing experiment or\n# control the key of new experiments. Default to a random key.\ncomet_experiment_key: str | None\n# Create a new experiment (\"create\") or log to an existing one (\"get\"). Default\n# (\"get_or_create\") auto-selects based on configuration.\ncomet_mode: str | None\n# Set to True to log data to Comet server, or False for offline storage. Default is\n# True.\ncomet_online: bool | None\n# Dictionary for additional configuration settings, see the doc for more details.\ncomet_experiment_config: dict[str, Any] | None\n\n# the number of activate layers in LISA\nlisa_n_layers: int | None\n# how often to switch layers in LISA\nlisa_step_interval: int | None\n# path under the model to access the layers\nlisa_layers_attribute: str | None = model.layers\n\ngradio_title: str | None\ngradio_share: bool | None\ngradio_server_name: str | None\ngradio_server_port: int | None\ngradio_max_new_tokens: int | None\ngradio_temperature: float | None\n\nuse_ray: bool = False\nray_run_name: str | None\nray_num_workers: int = 1\nresources_per_worker: dict\n\n# The size of the image to resize to. It can be an integer (resized into padded-square\n# image) or a tuple (width, height).If not provided, we will attempt to load from\n# preprocessor.size, otherwise, images won't be resized.\nimage_size: int | tuple[int, int] | None\n# The resampling algorithm to use for image resizing. Default is bilinear. Please refer\n# to PIL.Image.Resampling for more details.\nimage_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None\n\n# optional overrides to the base model configuration\noverrides_of_model_config: dict[str, Any] | None\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs: dict[str, Any] | None\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good\n# choice too\ntype_of_model: str | None\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model: str | None\n\nmax_packed_sequence_len: int | None\nrope_scaling: Any | None\nnoisy_embedding_alpha: float | None\ndpo_beta: float | None\nevaluation_strategy: str | None", + "text": "# Allow overwrite yml config using from cli\nstrict: bool | None = False\n# Resume from a specific checkpoint dir\nresume_from_checkpoint: str | None\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: bool | None\n# Resize the model embeddings when new tokens are added to multiples of 32. This is\n# reported to improve training speed on some models\nresize_token_embeddings_to_32x: bool | None\nmean_resizing_embeddings: bool | None = False\n\n# Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings: bool | None\n# Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs\nembeddings_skip_upcast: bool | None\n\n# Use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl: RLType | None\n\ntrl: TRLConfig | None\n # For TRLConfig:\n # Beta parameter for the RL training. Same as `rl_beta`. Use\n beta: float | None\n # Maximum length of the completion for RL training.\n max_completion_length: int | None\n\n # Whether to use VLLM for RL training.\n use_vllm: bool = False\n # VLLM mode to use, one of 'server' or 'colocate'\n vllm_mode: Literal['server', 'colocate'] | None\n # Host of the vLLM server to connect to.\n vllm_server_host: str | None = 0.0.0.0\n # Port of the vLLM server to connect to.\n vllm_server_port: int | None = 8000\n # Total timeout (in seconds) to wait for the vLLM server to respond.\n vllm_server_timeout: int | None\n # Regex for vLLM guided decoding.\n vllm_guided_decoding_regex: str | None\n\n # List of reward functions to load. Paths must be importable from current dir.\n reward_funcs: list[str] | None\n # List of reward weights for the reward functions.\n reward_weights: list[float] | None\n # Number of generations to sample.\n num_generations: int | None\n # Whether to log completions.\n log_completions: bool | None = False\n # Number of completions to print when log_completions is True.\n num_completions_to_print: int | None\n # Controls whether importance sampling ratios are computed at the `'token'` or\n # `'sequence'` level. For GSPO, use `sequence`, default is None which corresponds to\n # the original GRPO paper.\n importance_sampling_level: Literal['sequence', 'token'] | None\n\n # Whether to sync the reference model.\n sync_ref_model: bool | None = False\n # Mixup alpha for the reference model.\n ref_model_mixup_alpha: float | None = 0.9\n # Sync steps for the reference model.\n ref_model_sync_steps: int | None = 64\n # Whether to scale rewards by their standard deviation.\n scale_rewards: bool = True\n\n # Sampling temperature for the GRPO policy.\n temperature: float | None\n # Top-p sampling probability for the generation policy.\n top_p: float | None\n # Top-k sampling for the generation policy.\n top_k: int | None\n # Minimum probability for the generation policy.\n min_p: float | None\n # Penalty for tokens that appear in prompt and generated text.\n repetition_penalty: float | None\n # Number of iterations per batch (μ) for GRPO.\n num_iterations: int | None\n # Epsilon value for clipping in the GRPO algorithm.\n epsilon: float | None\n # Upper-bound epsilon value for clipping in the GRPO algorithm.\n epsilon_high: float | None\n # Whether to use Liger loss for GRPO.\n use_liger_loss: bool | None\n # Loss formulation to use. Supported values: grpo, bnpo, dr_grpo.\n loss_type: str | None\n # Whether to exclude truncated completions from loss calculation.\n mask_truncated_completions: bool = False\n\nvllm: VllmConfig | None\n # For VllmConfig:\n # Device to use for VLLM\n device: str | None = auto\n # Tensor parallel size for VLLM\n tensor_parallel_size: int | None\n # Data parallel size for VLLM\n data_parallel_size: int | None\n # GPU memory utilization for VLLM\n gpu_memory_utilization: float | None = 0.9\n # Data type for VLLM\n dtype: str | None = auto\n # Maximum length of the model context for VLLM\n max_model_len: int | None\n # Enable prefix caching for VLLM\n enable_prefix_caching: bool | None\n # Host for the vLLM server to start on\n host: str | None = 0.0.0.0\n # Port of the vLLM server to start on\n port: int | None = 8000\n\n # Enable reasoning for VLLM\n enable_reasoning: bool | None\n # Reasoning parser for VLLM\n reasoning_parser: str | None\n\nqat: QATConfig | None\n # For QATConfig:\n # Fake quantization layout to use for activation quantization. Valid options are\n # \"int4\" and \"int8\"\n activation_dtype: TorchIntDType | None\n # Fake quantization layout to use for weight quantization. Valid options are \"int4\"\n # and \"int8\"\n weight_dtype: TorchIntDType = TorchIntDType.int8\n # Quantize embedding\n quantize_embedding: bool | None = False\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n # The number of steps to apply fake quantization after\n fake_quant_after_n_steps: int | None\n\nquantization: PTQConfig | None\n # For PTQConfig:\n # Fake quantization layout to use for weight quantization. Valid options are uintX for\n # X in [1, 2, 3, 4, 5, 6, 7], or int4, or int8\n weight_dtype: TorchIntDType = TorchIntDType.int8\n # Fake quantization layout to use for activation quantization. Valid options are\n # \"int4\" and \"int8\"\n activation_dtype: TorchIntDType | None\n # Whether to quantize the embedding layer.\n quantize_embedding: bool | None\n # The number of elements in each group for per-group fake quantization\n group_size: int | None = 32\n\n# Reward modelling: `True` or `False`\nreward_model: bool | None\n# Process reward modelling: `True` or `False`\nprocess_reward_model: bool | None\nnum_labels: int | None\n\n# Whether to perform weighting in DPO trainer\ndpo_use_weighting: bool | None\ndpo_use_logits_to_keep: bool | None\ndpo_label_smoothing: float | None\ndpo_norm_loss: bool | None\ndpo_padding_free: bool | None\ndpo_generate_during_eval: bool | None\n\n# A list of one or more datasets to finetune the model with\ndatasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n# A list of one or more datasets to eval the model with. You can use either\n# test_datasets, or val_set_size, but not both.\ntest_datasets: Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n # For DPODataset:\n path: str | None\n split: str | None\n type: UserDefinedDPOType | str | None\n # For UserDefinedDPOType:\n field_system: str | None\n field_prompt: str | None\n field_chosen: str | None\n field_rejected: str | None\n prompt_format: str | None\n chosen_format: str | None\n rejected_format: str | None\n data_files: list[str] | None\n revision: str | None\n field_messages: str | None\n\n # For KTODataset:\n path: str | None\n split: str | None\n type: UserDefinedKTOType | str | None\n # For UserDefinedKTOType:\n field_system: str | None\n field_prompt: str | None\n field_completion: str | None\n field_label: bool | None\n prompt_format: str | None\n completion_format: str | None\n data_files: list[str] | None\n trust_remote_code: bool | None = False\n revision: str | None\n\n # For StepwiseSupervisedDataset:\n path: str | None\n split: str | None\n data_files: list[str] | None\n revision: str | None\n step_separator: str | None\n max_completion_length: int | None\n train_on_last_step_only: bool | None\n\n# If false, the datasets will not be shuffled and will keep their original order in\n# `datasets`. The same applies to the `test_datasets` option and the\n# `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: bool | None = True\n# If true, each dataset in `datasets` will be shuffled before merging. This allows\n# curriculum learning strategies to be applied at the dataset level. Default is false.\nshuffle_before_merging_datasets: bool | None = False\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: str | None\n# Num shards for whole dataset\ndataset_shard_num: int | None\n# Index of shard to use for whole dataset\ndataset_shard_idx: int | None\nskip_prepare_dataset: bool | None = False\n# Number of shards to save the prepared dataset\nnum_dataset_shards_to_save: int | None\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset: Annotated[list[PretrainingDataset | SFTDataset], MinLen(1)] | None\n # For PretrainingDataset:\n name: str | None\n path: str | None\n split: str | None = train\n text_column: str | None = text\n type: str | None = pretrain\n trust_remote_code: bool | None = False\n data_files: str | None\n skip: int | None\n\n # For SFTDataset:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n path: str | None\n # name of dataset split to load from\n split: str | None\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: str | UserDefinedPrompterType | None\n # For UserDefinedPrompterType:\n # Custom user instruction prompt\n system_prompt: str | None\n # Use {system} as key to be replaced\n system_format: str | None\n field_system: str | None\n field_instruction: str | None\n field_input: str | None\n field_output: str | None\n\n # Customizable to be single line or multi-line. Use {instruction}/{input} as key to\n # be replaced. 'format' can include {input}\n format: str | None\n # 'no_input_format' cannot include {input}\n no_input_format: str | None\n input_transform: str | None\n # split dataset into N pieces (use with shards_idx)\n shards: int | None\n # the index of sharded dataset to use\n shards_idx: int | None\n # process dataset in N sequential chunks for memory efficiency (exclusive with\n # `shards`)\n preprocess_shards: int | None\n conversation: str | None\n\n # The name of the chat template to use for training, following values are supported:\n # tokenizer_default: Uses the chat template that is available in the\n # tokenizer_config.json. If the chat template is not available in the tokenizer, it\n # will raise an error. This is the default.\n # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n # tokenizer_default_fallback_*: where * is the name of the chat template to fallback\n # to if the tokenizer does not have a chat template else default to tokenizer. E.g.\n # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat\n # template. The custom jinja template should be provided in the chat_template_jinja\n # field.\n chat_template: ChatTemplate | str | None\n # Custom jinja chat template or path to jinja file. Used only if `chat_template:\n # jinja` or empty.\n chat_template_jinja: str | None\n # path to source data files\n data_files: str | list[str] | None\n input_format: str | None\n # name of dataset configuration to load\n name: str | None\n # defines the datatype when path is a file\n ds_type: str | None\n # For `completion` datasets only, uses the provided field instead of `text` column\n field: str | None\n field_human: str | None\n field_model: str | None\n # Key containing the messages (default: \"messages\")\n field_messages: str | None\n # Key containing the tools (default: \"tools\"). Must be a list[dict] and follow [JSON\n # schema](https://json-schema.org/learn/getting-started-step-by-step).\n field_tools: str | None\n\n message_field_role: str | None\n\n message_field_content: str | None\n # Mapping of properties from the input dataset to the chat template. (default:\n # message_property_mappings={'role':'role', 'content':'content'}) If a property exists\n # in the template but not in this mapping, the system will attempt to load it directly\n # from the message using the property name as the key. Example: In the mapping below,\n # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and\n # used as 'content' in the chat template.\n message_property_mappings: dict[str, str] | None\n # The key in the message turn that indicates via boolean whether tokens of a turn\n # should be considered for training. Useful to selectively train on certain turns\n # besides the `roles_to_train`.\n message_field_training: str | None\n # The key in the message turn that contains the training details. Useful to\n # selectively train on certain tokens in a turn. The value of the key is a List[Dict]\n # containing `begin_offset` (start character index in content), `end_offset` (end\n # character index in content), and `train` (boolean whether to train).\n message_field_training_detail: str | None\n # (for Qwen3 template only) Whether to split the assistant content based on a\n # reasoning trace inside delimited tags\n split_thinking: bool | None\n logprobs_field: str | None\n temperature: float | None\n # Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: list[str] | None\n # Which EOS tokens to train on in the conversation. Possible values are: all: train on\n # all EOS tokens, turn (default): train on the EOS token at the end of each trainable\n # turn, last: train on the last EOS token in the conversation\n train_on_eos: Literal['all', 'turn', 'last'] | None\n # Roles mapping in the messages. The format is {target_role: [source_roles]}. All\n # source roles will be mapped to the target role. The default is: user: [\"human\",\n # \"user\"], assistant: [\"gpt\", \"assistant\"], system: [\"system\"], tool: [\"tool\"]\n roles: dict[str, list[str]] | None\n # Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If\n # you wish to, we recommend using a custom jinja template with the default system\n # message removed or adding a system turn with empty content.\n drop_system_message: bool | None\n # Trust remote code for untrusted source\n trust_remote_code: bool | None = False\n # The specific revision of the dataset to use when loading from the Hugging Face Hub.\n # This can be a commit hash, tag, or branch name. If not specified, the latest version\n # will be used. This parameter is ignored for local datasets.\n revision: str | None\n\n# The maximum number of processes to use while preprocessing your input dataset. This\n# defaults to `os.cpu_count()` if not set. For Runpod VMs, it will default to number of\n# vCPUs via RUNPOD_CPU_COUNT.\ndataset_processes: int | None\n# Deduplicates datasets and test_datasets with identical entries\ndataset_exact_deduplication: bool | None\n# Keep dataset in memory while preprocessing. Only needed if cached dataset is taking\n# too much storage\ndataset_keep_in_memory: bool | None\ndataloader_pin_memory: bool | None\ndataloader_num_workers: int | None\ndataloader_prefetch_factor: int | None\ndataloader_drop_last: bool | None\n\naccelerator_config: dict[str, Any] | None\n\nremove_unused_columns: bool | None\n\n# Push prepared dataset to hub - repo_org/repo_name\npush_dataset_to_hub: str | None\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private\n# datasets. Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: bool | None\n\ndevice: Any | None\n# Passed through to transformers when loading the model when launched without\n# accelerate. Use `sequential` when training w/ model parallelism to limit memory\ndevice_map: Any | None\nworld_size: int | None\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank: int | None\nddp: bool | None\n\n# Seed for reproducibility\nseed: int | None\n# Advanced DDP Arguments - timeout\nddp_timeout: int | None\n# Advanced DDP Arguments - bucket cap in MB\nddp_bucket_cap_mb: int | None\n# Advanced DDP Arguments - broadcast buffers\nddp_broadcast_buffers: bool | None\nddp_find_unused_parameters: bool | None\n\n# Approximate number of predictions sent to wandb depending on batch size. Enabled above\n# 0. Default is 0\neval_table_size: int | None\n# Total number of tokens generated for predictions sent to wandb. Default is 128\neval_max_new_tokens: int | None\n# Whether to run causal language model evaluation for metrics in\n# `eval_causal_lm_metrics`\ndo_causal_lm_eval: bool | None\n# HF evaluate metrics used during evaluation. Default is ['sacrebleu', 'comet', 'ter',\n# 'chrf', 'perplexity']\neval_causal_lm_metrics: list[str] | None\ndo_bench_eval: bool | None\nbench_dataset: str | None\nbench_split: str | None\nmetric_for_best_model: str | None\ngreater_is_better: bool | None\n\n# High loss value, indicating the learning has broken down (a good estimate is ~2 times\n# the loss at the start of training)\nloss_watchdog_threshold: float | None\n# Number of high-loss steps in a row before the trainer aborts (default: 3)\nloss_watchdog_patience: int | None\n\n# Run garbage collection every `gc_steps` steps. -1 will run on epoch end and before\n# evaluations. Default is 0 (disabled).\ngc_steps: int | None\n\n# Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection.\n# require >=ampere\nbf16: Literal['auto'] | bool | None = auto\n# Use CUDA fp16\nfp16: bool | None\n# Enable FP8 mixed precision training using TorchAO. Best used in combination with\n# torch.compile.\nfp8: bool | None\n# Enable FSDP float8 all-gather optimization for FP8 training. Can improve training\n# speed by 10-15% when FSDP is enabled.\nfp8_enable_fsdp_float8_all_gather: bool | None\n# No AMP (automatic mixed precision) - require >=ampere\nbfloat16: bool | None\n# No AMP (automatic mixed precision)\nfloat16: bool | None\n# Use CUDA tf32 - require >=ampere\ntf32: bool | None\nfloat32: bool | None\n\n# Whether to use gradient checkpointing. Available options are: true, false, 'offload',\n# 'offload_disk'.\n# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: Literal['offload', 'offload_disk'] | bool | None = False\n# Additional kwargs to pass to the trainer for gradient checkpointing\ngradient_checkpointing_kwargs: dict[str, Any] | None\n# Whether to offload activations. Available options are: true, false, 'legacy', 'disk'.\nactivation_offloading: Literal['legacy', 'disk'] | bool | None = False\n\nunfrozen_parameters: list[str] | None\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: int = 512\n# The maximum length of an input for evaluation. If not specified, defaults to\n# sequence_len\neval_sequence_len: int | None\nmin_sample_len: int | None\n# maximum prompt length for RL training\nmax_prompt_len: int = 512\n# Use efficient multi-packing with block diagonal attention and per sequence\n# position_ids. Recommend set to 'true'\nsample_packing: bool | None\n# The number of samples packed at a time. Increasing the following values helps with\n# packing, but usually only slightly (<%1.)\nsample_packing_group_size: int | None = 100000\n# The number of samples which can be packed into one sequence. Increase if using a large\n# sequence_len with many short samples.\nsample_packing_bin_size: int | None = 200\n# Whether to pack samples sequentially\nsample_packing_sequentially: bool | None\n# The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or\n# 'forkserver'\nsample_packing_mp_start_method: str | None\n# Set to 'false' if getting errors during eval with sample_packing on\neval_sample_packing: bool | None\n# Pad inputs so each step uses constant sized buffers. This will reduce memory\n# fragmentation and may prevent OOMs, by re-using memory more efficiently. Defaults to\n# True if `sample_packing` enabled\npad_to_sequence_len: bool | None\n# Whether to use sequential sampling for curriculum learning\ncurriculum_sampling: bool | None\nmultipack_real_batches: bool | None\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation: bool | None\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening: Literal['auto'] | bool | None\n\nuse_pose: bool | None\npose_split_on_token_ids: list[int] | None\npose_max_context_len: int | None\npose_num_chunks: int | None\n\npretrain_multipack_buffer_size: int | None = 10000\n# whether to prevent cross attention for packed sequences during pretraining\npretrain_multipack_attn: bool | None = True\n\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers\nxformers_attention: bool | None\n# Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/\n# torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention: bool | None\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention: bool | None\nflex_attention: bool | None\nflex_attn_compile_kwargs: dict[str, Any] | None\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention\nflash_attention: bool | None\n# Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_cross_entropy: bool | None\n# Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_rms_norm: bool | None\n# Whether to fuse QKV into a single operation\nflash_attn_fuse_qkv: bool | None\n# Whether to fuse part of the MLP into a single operation\nflash_attn_fuse_mlp: bool | None\n# Whether to use bettertransformers\nflash_optimum: bool | None\n\neager_attention: bool | None\n\nunsloth_cross_entropy_loss: bool | None\nunsloth_lora_mlp: bool | None\nunsloth_lora_qkv: bool | None\nunsloth_lora_o: bool | None\nunsloth_rms_norm: bool | None\nunsloth_rope: bool | None\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_mlp_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_qkv_kernel: bool | None\n# Apply custom LoRA autograd functions and activation function Triton kernels for speed\n# and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_o_kernel: bool | None\n\n# Whether to use chunked cross entropy loss for memory efficiency\nchunked_cross_entropy: bool | None\n# Number of chunks to use for chunked cross entropy loss\nchunked_cross_entropy_num_chunks: int | None\n\n# Whether to use ALST tiled mlp for memory efficient long context\ntiled_mlp: bool | None\n\n# Number of shards to use for ALST tiled mlp. If unset, it will be set based on\n# seqlen/hidden_size\ntiled_mlp_num_shards: int | None\n\n# Whether to use original mlp for ALST tiled mlp. Otherwise uses a generic MLP based on\n# llama.\ntiled_mlp_use_original_mlp: bool | None = True\n\nllama4_linearized_experts: bool | None\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed: str | dict[str, Any] | None\n# Whether to use deepcompile for faster training with deepspeed\ndeepcompile: bool | None\n# FSDP configuration\nfsdp: list[str] | None\n\n# FSDP configuration options\nfsdp_config: dict[str, Any] | None\n# FSDP version\nfsdp_version: int | None\nfsdp_final_state_dict_type: Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None\n\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for\n# no eval.\nval_set_size: float | None = 0.0\n\n# Number of devices to shard across. If not set, will use all available devices.\ndp_shard_size: int | None\n# Number of devices to replicate across.\ndp_replicate_size: int | None\n# Deprecated: use `context_parallel_size` instead\nsequence_parallel_degree: int | None\n# Set to a divisor of the number of GPUs available to split sequences into chunks of\n# equal size. Use in long context training to prevent OOM when sequences cannot fit into\n# a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each\n# sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized\n# subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more\n# details.\ncontext_parallel_size: int | None\n# Optional; strides across the key dimension. Larger values use more memory but should\n# make training faster. Must evenly divide the number of KV heads in your model.\nheads_k_stride: int | None\n# One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to\n# 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing\n# case.\nring_attn_func: RingAttnFunc | None\n# Number of tensor parallel processes in TP group. Only supported with DeepSpeed AutoTP.\ntensor_parallel_size: int | None\n\n# Add or change special tokens. If you add tokens here, you don't need to add them to\n# the `tokens` list.\nspecial_tokens: SpecialTokensConfig | None\n # For SpecialTokensConfig:\n bos_token: str | None\n eos_token: str | None\n pad_token: str | None\n unk_token: str | None\n additional_special_tokens: list[str] | None\n\n# Add extra tokens to the tokenizer\ntokens: list[str] | None\n# Mapping token_id to new_token_string to override reserved added_tokens in the\n# tokenizer. Only works for tokens that are not part of the base vocab (aka are\n# added_tokens). Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: dict[int, str] | None\n\n# Whether to use torch.compile and which backend to use. setting to `auto` will enable\n# torch compile when torch>=2.6.0\ntorch_compile: Literal['auto'] | bool | None\n# Backend to use for torch.compile\ntorch_compile_backend: str | None\ntorch_compile_mode: Literal['default', 'reduce-overhead', 'max-autotune'] | None\n\n# Maximum number of iterations to train for. It precedes num_epochs which means that if\n# both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =>\n# `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps: int | None\n# Number of warmup steps. Cannot use with warmup_ratio\nwarmup_steps: int | None\n# Warmup ratio. Cannot use with warmup_steps\nwarmup_ratio: float | None\n# Leave empty to eval at each epoch, integer for every N steps. float for fraction of\n# total steps\neval_steps: int | float | None\n# Number of times per epoch to run evals, mutually exclusive with eval_steps\nevals_per_epoch: int | None\n# Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer\n# from `eval_steps`\neval_strategy: str | None\n\n# Leave empty to save at each epoch, integer for every N steps. float for fraction of\n# total steps\nsave_steps: int | float | None\n# Number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsaves_per_epoch: int | None\n# Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better\n# result is achieved, leave empty to infer from `save_steps`\nsave_strategy: str | None\n# Checkpoints saved at a time\nsave_total_limit: int | None\n# Whether to checkpoint a model after the first step of training. Defaults to False.\nsave_first_step: bool | None\n\n# Logging frequency\nlogging_steps: int | None\n# Stop training after this many evaluation losses have increased in a row. https://huggi\n# ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin\n# gCallback\nearly_stopping_patience: int | None\nload_best_model_at_end: bool | None = False\n# Save only the model weights, skipping the optimizer. Using this means you can't resume\n# from checkpoints.\nsave_only_model: bool | None = False\n# Use tensorboard for logging\nuse_tensorboard: bool | None\n# Enable the pytorch profiler to capture the first N steps of training to the\n# output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more\n# information. Snapshots can be visualized @ https://pytorch.org/memory_viz\nprofiler_steps: int | None\n# Which step to start the profiler at. Useful for only capturing a few steps mid-run.\nprofiler_steps_start: int | None = 0\n# bool of whether to include tokens trainer per second in the training metrics. This\n# iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second: bool | None\n\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to\n# add noise to embeddings. Currently only supported on Llama and Mistral\nneftune_noise_alpha: float | None\n\n# Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to\n# `beta` in `ORPOConfig` due to trl mapping.\norpo_alpha: float | None\n# Weighting of NLL term in loss from RPO paper\nrpo_alpha: float | None\n# Target reward margin for the SimPO loss\nsimpo_gamma: float | None\n# Weight of the BC regularizer\ncpo_alpha: float | None\n\n# Factor for desirable loss term in KTO loss\nkto_desirable_weight: float | None\n# Factor for undesirable loss term in KTO loss\nkto_undesirable_weight: float | None\n# The beta parameter for the RL training\nrl_beta: float | None\n\n# Defines the max memory usage per gpu on the system. Passed through to transformers\n# when loading the model.\nmax_memory: dict[int | Literal['cpu', 'disk'], int | str] | None\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in\n# gigabytes); default: unset\ngpu_memory_limit: int | str | None\n# Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage: bool | None\n\n# The name of the chat template to use for training, following values are supported:\n# tokenizer_default: Uses the chat template that is available in the\n# tokenizer_config.json. If the chat template is not available in the tokenizer, it will\n# raise an error. This is the default value.\n# alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates\n# are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.\n# tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.\n# E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not\n# available in the tokenizer. jinja: Uses a custom jinja template for the chat template.\n# The custom jinja template should be provided in the chat_template_jinja field. The\n# selected chat template will be saved to the tokenizer_config.json for easier\n# inferencing\nchat_template: ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None\n# Custom jinja template or path to jinja file for chat template. This will be only used\n# if chat_template is set to `jinja` or `null` (in which case chat_template is\n# automatically set to `jinja`). Default is null.\nchat_template_jinja: str | None\n# Additional kwargs to pass to the chat template. This is useful for customizing the\n# chat template. For example, you can pass `thinking=False` to add a generation prompt\n# to the chat template.\nchat_template_kwargs: dict[str, Any] | None\n# Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the\n# boundaries between conversation turns. For example: ['/INST', '</s>',\n# '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is\n# useful for templates that use multiple delimiter tokens.\neot_tokens: list[str] | None\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: str | None\n\nfix_untrained_tokens: int | list[int] | None\n\nis_preprocess: bool | None\npreprocess_iterable: bool | None\n\n# Total number of tokens - internal use\ntotal_num_tokens: int | None\ntotal_supervised_tokens: int | None\n# You can set these packing optimizations AFTER starting a training at least once. The\n# trainer will provide recommended values for these values.\nsample_packing_eff_est: float | None\naxolotl_config_path: str | None\n\n# Internal use only - Used to identify which the model is based on\nis_falcon_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_llama_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on. Please note that if\n# you set this to true, `padding_side` will be set to 'left' by default\nis_mistral_derived_model: bool | None\n# Internal use only - Used to identify which the model is based on\nis_qwen_derived_model: bool | None\n\n# Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available\n# plugins or doc below for more details.\n# https://docs.axolotl.ai/docs/custom_integrations.html\nplugins: list[str] | None\n\n# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This\n# can also be a relative path to a model on disk\nbase_model: str (required)\n# If the base_model repo on hf hub doesn't include configuration .json files, You can\n# set that here, or leave this empty to default to base_model\nbase_model_config: str | None\ncls_model_config: str | None\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config: str | None\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast: bool | None\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy: bool | None\n# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-\n# common tokenizer.\ntokenizer_use_mistral_common: bool | None\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: str | None\n# transformers processor class\nprocessor_type: str | None\n# Trust remote code for untrusted source\ntrust_remote_code: bool | None\n\n# Don't move the model to the device before sharding. This is an experimental feature\n# that may be included in the future as the default.\nexperimental_skip_move_to_device: bool | None\n\n# Where to save the full-finetuned model to\noutput_dir: str = ./model-out\n# push checkpoints to hub\nhub_model_id: str | None\n# how to push checkpoints to hub\nhub_strategy: str | None\n# Save model as safetensors (require safetensors package). Default True\nsave_safetensors: bool | None = True\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: bool | None = False\n# Use bitsandbytes 4 bit\nload_in_4bit: bool | None = False\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in\n# original model\nadapter: str | None\n# If you already have a lora model trained that you want to load, put that here. This\n# means after training, if you want to test the model, you should set this to the value\n# of `output_dir`. Note that if you merge an adapter to the base model, a new\n# subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir: str | None\nlora_r: int | None\nlora_alpha: int | None\nlora_fan_in_fan_out: bool | None\nlora_target_modules: str | list[str] | None\nlora_target_parameters: str | list[str] | None\n# If true, will target all linear modules\nlora_target_linear: bool | None\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules\n# because they need to know the new tokens. For LLaMA and Mistral, you need to save\n# `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts\n# tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\nlora_modules_to_save: list[str] | None\nlora_dropout: float | None = 0.0\n# The layer indices to transform, otherwise, apply to all layers\npeft_layers_to_transform: list[int] | None\npeft_layers_pattern: list[str] | None\n\npeft: PeftConfig | None\n # For PeftConfig:\n # Configuration options for loftq initialization for LoRA\n loftq_config: LoftQConfig | None\n # For LoftQConfig:\n # typically 4 bits\n loftq_bits: int = 4\n\n# Whether to use DoRA.\npeft_use_dora: bool | None\n# Whether to use RSLoRA.\npeft_use_rslora: bool | None\n# List of layer indices to replicate.\npeft_layer_replication: list[tuple[int, int]] | None\n# How to initialize LoRA weights. Default to True which is MS original implementation.\npeft_init_lora_weights: bool | str | None\n\n# load qlora model in sharded format for FSDP using answer.ai technique.\nqlora_sharded_model_loading: bool | None = False\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it\n# takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: bool | None\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: bool | None\n# optional overrides to the bnb 4bit quantization configuration\nbnb_config_kwargs: dict[str, Any] | None\n\n# loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_ratio: float | None\n# loraplus learning rate for lora embedding layers. Default value is 1e-6.\nloraplus_lr_embedding: float | None = 1e-06\n\nmerge_lora: bool | None\n\n# Whether to use ReLoRA. Use with jagged_restart_*steps options.\nrelora: bool | None\n# threshold for optimizer magnitude when pruning\nrelora_prune_ratio: float | None\n# True to perform lora weight merges on cpu during restarts, for modest gpu memory\n# savings\nrelora_cpu_offload: bool | None\n\n# how often to reset for jagged restarts\njagged_restart_steps: int | None\n# how many warmup steps to take after reset for jagged restarts\njagged_restart_warmup_steps: int | None\n# how many anneal steps to take before reset for jagged restarts\njagged_restart_anneal_steps: int | None\n\n# If greater than 1, backpropagation will be skipped and the gradients will be\n# accumulated for the given number of steps.\ngradient_accumulation_steps: int | None = 1\n# The number of samples to include in each batch. This is the number of samples sent to\n# each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: int | None = 1\n# Total batch size, we do not recommended setting this manually\nbatch_size: int | None\n# per gpu micro batch size for evals, defaults to value of micro_batch_size\neval_batch_size: int | None\n\n# whether to find batch size that fits in memory. Passed to underlying transformers\n# Trainer\nauto_find_batch_size: bool | None\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: bool | None = False\n# Group similarly sized data to minimize padding. May be slower to start, as it must\n# download and sort the entire dataset. Note that training loss may have an oscillating\n# pattern with this enabled.\ngroup_by_length: bool | None\n\nlearning_rate: str | float (required)\nembedding_lr: float | None\nembedding_lr_scale: float | None\n# Specify weight decay\nweight_decay: float | None = 0.0\n# Specify optimizer\noptimizer: OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED\n# Dictionary of arguments to pass to the optimizer\noptim_args: str | dict[str, Any] | None\n# The target modules to optimize, i.e. the module names that you would like to train,\n# right now this is used only for GaLore algorithm\noptim_target_modules: list[str] | Literal['all_linear'] | None\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path: str | None\nlr_scheduler: SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler_kwargs: dict[str, Any] | None\nlr_quadratic_warmup: bool | None\n# decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of\n# peak lr\ncosine_min_lr_ratio: float | None\n# freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means\n# start cosine_min_lr at 80% of training step\ncosine_constant_lr_ratio: float | None\n# Learning rate div factor\nlr_div_factor: float | None\n\nlr_groups: list[LrGroup] | None\n # For LrGroup:\n name: str (required)\n modules: list[str] (required)\n lr: float (required)\n\n# adamw hyperparams\nadam_epsilon: float | None\n# only used for CAME Optimizer\nadam_epsilon2: float | None\n# adamw hyperparams\nadam_beta1: float | None\n# adamw hyperparams\nadam_beta2: float | None\n# only used for CAME Optimizer\nadam_beta3: float | None\n\n# Dion Optimizer learning rate\ndion_lr: float | None\n# Dion Optimizer momentum\ndion_momentum: float | None\n# Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank\n# dimension.\ndion_rank_fraction: float | None = 1.0\n# Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may\n# be useful to ensure even sharding.\ndion_rank_multiple_of: int | None = 1\n\n# Gradient clipping max norm\nmax_grad_norm: float | None\nnum_epochs: float = 1.0\n\nuse_wandb: bool | None\n# Set the name of your wandb run\nwandb_name: str | None\n# Set the ID of your wandb run\nwandb_run_id: str | None\n# \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn\n# off wandb\nwandb_mode: str | None\n# Your wandb project name\nwandb_project: str | None\n# A wandb Team name if using a Team\nwandb_entity: str | None\nwandb_watch: str | None\n# \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only\n# at the end of training\nwandb_log_model: str | None\n\nuse_mlflow: bool | None\n# URI to mlflow\nmlflow_tracking_uri: str | None\n# Your experiment name\nmlflow_experiment_name: str | None\n# Your run name\nmlflow_run_name: str | None\n# set to true to copy each saved checkpoint on each save to mlflow artifact registry\nhf_mlflow_log_artifacts: bool | None\n\n# Enable or disable Comet integration.\nuse_comet: bool | None\n# API key for Comet. Recommended to set via `comet login`.\ncomet_api_key: str | None\n# Workspace name in Comet. Defaults to the user's default workspace.\ncomet_workspace: str | None\n# Project name in Comet. Defaults to Uncategorized.\ncomet_project_name: str | None\n# Identifier for the experiment. Used to append data to an existing experiment or\n# control the key of new experiments. Default to a random key.\ncomet_experiment_key: str | None\n# Create a new experiment (\"create\") or log to an existing one (\"get\"). Default\n# (\"get_or_create\") auto-selects based on configuration.\ncomet_mode: str | None\n# Set to True to log data to Comet server, or False for offline storage. Default is\n# True.\ncomet_online: bool | None\n# Dictionary for additional configuration settings, see the doc for more details.\ncomet_experiment_config: dict[str, Any] | None\n\n# the number of activate layers in LISA\nlisa_n_layers: int | None\n# how often to switch layers in LISA\nlisa_step_interval: int | None\n# path under the model to access the layers\nlisa_layers_attribute: str | None = model.layers\n\ngradio_title: str | None\ngradio_share: bool | None\ngradio_server_name: str | None\ngradio_server_port: int | None\ngradio_max_new_tokens: int | None\ngradio_temperature: float | None\n\nuse_ray: bool = False\nray_run_name: str | None\nray_num_workers: int = 1\nresources_per_worker: dict\n\n# The size of the image to resize to. It can be an integer (resized into padded-square\n# image) or a tuple (width, height).If not provided, we will attempt to load from\n# preprocessor.size, otherwise, images won't be resized.\nimage_size: int | tuple[int, int] | None\n# The resampling algorithm to use for image resizing. Default is bilinear. Please refer\n# to PIL.Image.Resampling for more details.\nimage_resize_algorithm: Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None\n\n# optional overrides to the base model configuration\noverrides_of_model_config: dict[str, Any] | None\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs: dict[str, Any] | None\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good\n# choice too\ntype_of_model: str | None\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model: str | None\n\nmax_packed_sequence_len: int | None\nrope_scaling: Any | None\nnoisy_embedding_alpha: float | None\ndpo_beta: float | None\nevaluation_strategy: str | None", "crumbs": [ "Getting Started", "Config Reference" diff --git a/sitemap.xml b/sitemap.xml index f5cbec854..892c37fdd 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,794 +2,794 @@ https://docs.axolotl.ai/TODO.html - 2025-08-05T20:29:19.431Z + 2025-08-06T04:13:21.021Z https://docs.axolotl.ai/index.html - 2025-08-05T20:29:19.452Z + 2025-08-06T04:13:21.049Z https://docs.axolotl.ai/docs/debugging.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/amd_hpc.html - 2025-08-05T20:29:19.432Z + 2025-08-06T04:13:21.023Z https://docs.axolotl.ai/docs/api/utils.callbacks.mlflow_.html - 2025-08-05T20:32:35.089Z + 2025-08-06T04:16:42.482Z https://docs.axolotl.ai/docs/api/monkeypatch.llama_expand_mask.html - 2025-08-05T20:32:34.503Z + 2025-08-06T04:16:41.902Z https://docs.axolotl.ai/docs/api/loaders.patch_manager.html - 2025-08-05T20:32:34.107Z + 2025-08-06T04:16:41.508Z https://docs.axolotl.ai/docs/api/core.chat.format.llama3x.html - 2025-08-05T20:32:33.783Z + 2025-08-06T04:16:41.187Z https://docs.axolotl.ai/docs/api/cli.train.html - 2025-08-05T20:32:33.841Z + 2025-08-06T04:16:41.246Z https://docs.axolotl.ai/docs/api/utils.callbacks.perplexity.html - 2025-08-05T20:32:35.080Z + 2025-08-06T04:16:42.474Z https://docs.axolotl.ai/docs/api/core.chat.messages.html - 2025-08-05T20:32:33.780Z + 2025-08-06T04:16:41.184Z https://docs.axolotl.ai/docs/api/utils.callbacks.lisa.html - 2025-08-05T20:32:35.085Z + 2025-08-06T04:16:42.479Z https://docs.axolotl.ai/docs/api/cli.merge_sharded_fsdp_weights.html - 2025-08-05T20:32:33.937Z + 2025-08-06T04:16:41.341Z https://docs.axolotl.ai/docs/api/monkeypatch.mixtral.html - 2025-08-05T20:32:34.563Z + 2025-08-06T04:16:41.962Z https://docs.axolotl.ai/docs/api/utils.chat_templates.html - 2025-08-05T20:32:34.601Z + 2025-08-06T04:16:41.999Z https://docs.axolotl.ai/docs/api/core.chat.format.shared.html - 2025-08-05T20:32:33.785Z + 2025-08-06T04:16:41.188Z https://docs.axolotl.ai/docs/api/core.trainers.mixins.optimizer.html - 2025-08-05T20:32:34.114Z + 2025-08-06T04:16:41.515Z https://docs.axolotl.ai/docs/api/utils.collators.mamba.html - 2025-08-05T20:32:35.028Z + 2025-08-06T04:16:42.422Z https://docs.axolotl.ai/docs/api/logging_config.html - 2025-08-05T20:32:33.728Z + 2025-08-06T04:16:41.133Z https://docs.axolotl.ai/docs/api/utils.collators.mm_chat.html - 2025-08-05T20:32:35.033Z + 2025-08-06T04:16:42.427Z https://docs.axolotl.ai/docs/api/prompt_strategies.completion.html - 2025-08-05T20:32:34.237Z + 2025-08-06T04:16:41.637Z https://docs.axolotl.ai/docs/api/kernels.utils.html - 2025-08-05T20:32:34.455Z + 2025-08-06T04:16:41.854Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.chat_template.html - 2025-08-05T20:32:34.271Z + 2025-08-06T04:16:41.671Z https://docs.axolotl.ai/docs/api/kernels.swiglu.html - 2025-08-05T20:32:34.446Z + 2025-08-06T04:16:41.845Z https://docs.axolotl.ai/docs/api/common.const.html - 2025-08-05T20:32:34.988Z + 2025-08-06T04:16:42.382Z https://docs.axolotl.ai/docs/api/cli.cloud.base.html - 2025-08-05T20:32:33.960Z + 2025-08-06T04:16:41.364Z https://docs.axolotl.ai/docs/api/prompt_strategies.orpo.chat_template.html - 2025-08-05T20:32:34.335Z + 2025-08-06T04:16:41.734Z https://docs.axolotl.ai/docs/api/core.builders.rl.html - 2025-08-05T20:32:33.744Z + 2025-08-06T04:16:41.148Z https://docs.axolotl.ai/docs/api/utils.dict.html - 2025-08-05T20:32:34.695Z + 2025-08-06T04:16:42.092Z https://docs.axolotl.ai/docs/api/utils.schemas.integrations.html - 2025-08-05T20:32:34.809Z + 2025-08-06T04:16:42.204Z https://docs.axolotl.ai/docs/api/core.trainers.utils.html - 2025-08-05T20:32:34.072Z + 2025-08-06T04:16:41.473Z https://docs.axolotl.ai/docs/api/monkeypatch.trainer_fsdp_optim.html - 2025-08-05T20:32:34.552Z + 2025-08-06T04:16:41.951Z https://docs.axolotl.ai/docs/api/cli.evaluate.html - 2025-08-05T20:32:33.849Z + 2025-08-06T04:16:41.254Z https://docs.axolotl.ai/docs/api/core.builders.causal.html - 2025-08-05T20:32:33.739Z + 2025-08-06T04:16:41.144Z https://docs.axolotl.ai/docs/api/monkeypatch.multipack.html - 2025-08-05T20:32:34.498Z + 2025-08-06T04:16:41.897Z https://docs.axolotl.ai/docs/api/monkeypatch.llama_patch_multipack.html - 2025-08-05T20:32:34.543Z + 2025-08-06T04:16:41.942Z https://docs.axolotl.ai/docs/api/cli.delinearize_llama4.html - 2025-08-05T20:32:33.902Z + 2025-08-06T04:16:41.306Z https://docs.axolotl.ai/docs/api/utils.schemas.trl.html - 2025-08-05T20:32:34.791Z + 2025-08-06T04:16:42.186Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.zephyr.html - 2025-08-05T20:32:34.293Z + 2025-08-06T04:16:41.693Z https://docs.axolotl.ai/docs/api/integrations.kd.trainer.html - 2025-08-05T20:32:34.975Z + 2025-08-06T04:16:42.369Z https://docs.axolotl.ai/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html - 2025-08-05T20:32:34.593Z + 2025-08-06T04:16:41.991Z https://docs.axolotl.ai/docs/api/utils.optimizers.adopt.html - 2025-08-05T20:32:34.703Z + 2025-08-06T04:16:42.099Z https://docs.axolotl.ai/docs/api/monkeypatch.data.batch_dataset_fetcher.html - 2025-08-05T20:32:34.562Z + 2025-08-06T04:16:41.960Z https://docs.axolotl.ai/docs/api/cli.cloud.modal_.html - 2025-08-05T20:32:33.967Z + 2025-08-06T04:16:41.370Z https://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_chat.html - 2025-08-05T20:32:34.197Z + 2025-08-06T04:16:41.597Z https://docs.axolotl.ai/docs/api/utils.freeze.html - 2025-08-05T20:32:34.623Z + 2025-08-06T04:16:42.021Z https://docs.axolotl.ai/docs/api/prompt_strategies.bradley_terry.llama3.html - 2025-08-05T20:32:34.339Z + 2025-08-06T04:16:41.738Z https://docs.axolotl.ai/docs/api/integrations.base.html - 2025-08-05T20:32:34.963Z + 2025-08-06T04:16:42.357Z https://docs.axolotl.ai/docs/api/monkeypatch.unsloth_.html - 2025-08-05T20:32:34.560Z + 2025-08-06T04:16:41.959Z https://docs.axolotl.ai/docs/api/prompt_strategies.kto.chatml.html - 2025-08-05T20:32:34.313Z + 2025-08-06T04:16:41.712Z https://docs.axolotl.ai/docs/api/cli.main.html - 2025-08-05T20:32:33.832Z + 2025-08-06T04:16:41.237Z https://docs.axolotl.ai/docs/api/common.datasets.html - 2025-08-05T20:32:35.003Z + 2025-08-06T04:16:42.397Z https://docs.axolotl.ai/docs/api/train.html - 2025-08-05T20:32:33.641Z + 2025-08-06T04:16:41.046Z https://docs.axolotl.ai/docs/api/utils.trainer.html - 2025-08-05T20:32:34.640Z + 2025-08-06T04:16:42.039Z https://docs.axolotl.ai/docs/api/prompt_strategies.llama2_chat.html - 2025-08-05T20:32:34.231Z + 2025-08-06T04:16:41.631Z https://docs.axolotl.ai/docs/api/index.html - 2025-08-05T20:32:33.578Z + 2025-08-06T04:16:40.984Z https://docs.axolotl.ai/docs/api/prompt_strategies.chat_template.html - 2025-08-05T20:32:34.183Z + 2025-08-06T04:16:41.584Z https://docs.axolotl.ai/docs/api/core.training_args.html - 2025-08-05T20:32:33.757Z + 2025-08-06T04:16:41.161Z https://docs.axolotl.ai/docs/api/kernels.quantize.html - 2025-08-05T20:32:34.453Z + 2025-08-06T04:16:41.852Z https://docs.axolotl.ai/docs/api/convert.html - 2025-08-05T20:32:33.676Z + 2025-08-06T04:16:41.081Z https://docs.axolotl.ai/docs/api/integrations.grokfast.optimizer.html - 2025-08-05T20:32:34.967Z + 2025-08-06T04:16:42.361Z https://docs.axolotl.ai/docs/api/prompt_strategies.stepwise_supervised.html - 2025-08-05T20:32:34.248Z + 2025-08-06T04:16:41.647Z https://docs.axolotl.ai/docs/api/utils.schemas.model.html - 2025-08-05T20:32:34.754Z + 2025-08-06T04:16:42.150Z https://docs.axolotl.ai/docs/api/utils.callbacks.qat.html - 2025-08-05T20:32:35.099Z + 2025-08-06T04:16:42.493Z https://docs.axolotl.ai/docs/api/loaders.constants.html - 2025-08-05T20:32:34.108Z + 2025-08-06T04:16:41.510Z https://docs.axolotl.ai/docs/api/cli.utils.sweeps.html - 2025-08-05T20:32:33.997Z + 2025-08-06T04:16:41.400Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.llama3.html - 2025-08-05T20:32:34.282Z + 2025-08-06T04:16:41.681Z https://docs.axolotl.ai/docs/api/core.datasets.transforms.chat_builder.html - 2025-08-05T20:32:33.798Z + 2025-08-06T04:16:41.202Z https://docs.axolotl.ai/docs/api/cli.utils.fetch.html - 2025-08-05T20:32:33.985Z + 2025-08-06T04:16:41.388Z https://docs.axolotl.ai/docs/api/core.trainers.mamba.html - 2025-08-05T20:32:34.040Z + 2025-08-06T04:16:41.442Z https://docs.axolotl.ai/docs/api/utils.schemas.enums.html - 2025-08-05T20:32:34.819Z + 2025-08-06T04:16:42.214Z https://docs.axolotl.ai/docs/api/utils.callbacks.profiler.html - 2025-08-05T20:32:35.084Z + 2025-08-06T04:16:42.477Z https://docs.axolotl.ai/docs/api/prompt_strategies.metharme.html - 2025-08-05T20:32:34.255Z + 2025-08-06T04:16:41.654Z https://docs.axolotl.ai/docs/api/core.trainers.trl.html - 2025-08-05T20:32:34.034Z + 2025-08-06T04:16:41.436Z https://docs.axolotl.ai/docs/api/prompt_strategies.orcamini.html - 2025-08-05T20:32:34.258Z + 2025-08-06T04:16:41.658Z https://docs.axolotl.ai/docs/api/utils.samplers.multipack.html - 2025-08-05T20:32:35.074Z + 2025-08-06T04:16:42.467Z https://docs.axolotl.ai/docs/api/utils.schedulers.html - 2025-08-05T20:32:34.669Z + 2025-08-06T04:16:42.066Z https://docs.axolotl.ai/docs/api/core.trainers.grpo.trainer.html - 2025-08-05T20:32:34.058Z + 2025-08-06T04:16:41.459Z https://docs.axolotl.ai/docs/api/prompt_tokenizers.html - 2025-08-05T20:32:33.718Z + 2025-08-06T04:16:41.123Z https://docs.axolotl.ai/docs/config-reference.html - 2025-08-05T20:32:48.025Z + 2025-08-06T04:16:55.970Z https://docs.axolotl.ai/docs/multimodal.html - 2025-08-05T20:29:19.436Z + 2025-08-06T04:13:21.028Z https://docs.axolotl.ai/docs/mixed_precision.html - 2025-08-05T20:29:19.436Z + 2025-08-06T04:13:21.028Z https://docs.axolotl.ai/docs/unsloth.html - 2025-08-05T20:29:19.437Z + 2025-08-06T04:13:21.029Z https://docs.axolotl.ai/docs/ray-integration.html - 2025-08-05T20:29:19.437Z + 2025-08-06T04:13:21.029Z https://docs.axolotl.ai/docs/dataset-formats/stepwise_supervised.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/dataset-formats/template_free.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/dataset-formats/index.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/dataset-formats/pretraining.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/nd_parallelism.html - 2025-08-05T20:29:19.437Z + 2025-08-06T04:13:21.029Z https://docs.axolotl.ai/docs/sequence_parallelism.html - 2025-08-05T20:29:19.437Z + 2025-08-06T04:13:21.029Z https://docs.axolotl.ai/docs/inference.html - 2025-08-05T20:29:19.436Z + 2025-08-06T04:13:21.028Z https://docs.axolotl.ai/docs/fsdp_qlora.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/multi-node.html - 2025-08-05T20:29:19.436Z + 2025-08-06T04:13:21.028Z https://docs.axolotl.ai/docs/lora_optims.html - 2025-08-05T20:29:19.436Z + 2025-08-06T04:13:21.028Z https://docs.axolotl.ai/docs/getting-started.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/dataset_loading.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/lr_groups.html - 2025-08-05T20:29:19.436Z + 2025-08-06T04:13:21.028Z https://docs.axolotl.ai/docs/input_output.html - 2025-08-05T20:29:19.436Z + 2025-08-06T04:13:21.028Z https://docs.axolotl.ai/src/axolotl/integrations/LICENSE.html - 2025-08-05T20:29:19.456Z + 2025-08-06T04:13:21.054Z https://docs.axolotl.ai/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html - 2025-08-05T20:29:19.457Z + 2025-08-06T04:13:21.054Z https://docs.axolotl.ai/docs/mac.html - 2025-08-05T20:29:19.436Z + 2025-08-06T04:13:21.028Z https://docs.axolotl.ai/docs/optimizers.html - 2025-08-05T20:29:19.437Z + 2025-08-06T04:13:21.029Z https://docs.axolotl.ai/docs/gradient_checkpointing.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/qat.html - 2025-08-05T20:29:19.437Z + 2025-08-06T04:13:21.029Z https://docs.axolotl.ai/docs/faq.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/dataset_preprocessing.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/nccl.html - 2025-08-05T20:29:19.437Z + 2025-08-06T04:13:21.028Z https://docs.axolotl.ai/docs/cli.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.023Z https://docs.axolotl.ai/docs/torchao.html - 2025-08-05T20:29:19.437Z + 2025-08-06T04:13:21.029Z https://docs.axolotl.ai/docs/multi-gpu.html - 2025-08-05T20:29:19.436Z + 2025-08-06T04:13:21.028Z https://docs.axolotl.ai/docs/rlhf.html - 2025-08-05T20:29:19.437Z + 2025-08-06T04:13:21.029Z https://docs.axolotl.ai/docs/dataset-formats/tokenized.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/dataset-formats/conversation.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.023Z https://docs.axolotl.ai/docs/dataset-formats/inst_tune.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/reward_modelling.html - 2025-08-05T20:29:19.437Z + 2025-08-06T04:13:21.029Z https://docs.axolotl.ai/docs/docker.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.024Z https://docs.axolotl.ai/docs/installation.html - 2025-08-05T20:29:19.436Z + 2025-08-06T04:13:21.028Z https://docs.axolotl.ai/docs/quantize.html - 2025-08-05T20:29:19.437Z + 2025-08-06T04:13:21.029Z https://docs.axolotl.ai/docs/custom_integrations.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.023Z https://docs.axolotl.ai/docs/batch_vs_grad.html - 2025-08-05T20:29:19.433Z + 2025-08-06T04:13:21.023Z https://docs.axolotl.ai/docs/api/cli.utils.train.html - 2025-08-05T20:32:34.008Z + 2025-08-06T04:16:41.411Z https://docs.axolotl.ai/docs/api/cli.art.html - 2025-08-05T20:32:33.872Z + 2025-08-06T04:16:41.277Z https://docs.axolotl.ai/docs/api/core.trainers.grpo.sampler.html - 2025-08-05T20:32:34.070Z + 2025-08-06T04:16:41.472Z https://docs.axolotl.ai/docs/api/loaders.model.html - 2025-08-05T20:32:34.082Z + 2025-08-06T04:16:41.483Z https://docs.axolotl.ai/docs/api/cli.preprocess.html - 2025-08-05T20:32:33.945Z + 2025-08-06T04:16:41.349Z https://docs.axolotl.ai/docs/api/cli.utils.html - 2025-08-05T20:32:33.968Z + 2025-08-06T04:16:41.372Z https://docs.axolotl.ai/docs/api/cli.inference.html - 2025-08-05T20:32:33.916Z + 2025-08-06T04:16:41.320Z https://docs.axolotl.ai/docs/api/monkeypatch.btlm_attn_hijack_flash.html - 2025-08-05T20:32:34.542Z + 2025-08-06T04:16:41.940Z https://docs.axolotl.ai/docs/api/datasets.html - 2025-08-05T20:32:33.662Z + 2025-08-06T04:16:41.068Z https://docs.axolotl.ai/docs/api/monkeypatch.transformers_fa_utils.html - 2025-08-05T20:32:34.559Z + 2025-08-06T04:16:41.957Z https://docs.axolotl.ai/docs/api/monkeypatch.llama_attn_hijack_flash.html - 2025-08-05T20:32:34.481Z + 2025-08-06T04:16:41.880Z https://docs.axolotl.ai/docs/api/monkeypatch.relora.html - 2025-08-05T20:32:34.502Z + 2025-08-06T04:16:41.901Z https://docs.axolotl.ai/docs/api/monkeypatch.stablelm_attn_hijack_flash.html - 2025-08-05T20:32:34.549Z + 2025-08-06T04:16:41.948Z https://docs.axolotl.ai/docs/api/loaders.adapter.html - 2025-08-05T20:32:34.097Z + 2025-08-06T04:16:41.498Z https://docs.axolotl.ai/docs/api/core.trainers.dpo.trainer.html - 2025-08-05T20:32:34.047Z + 2025-08-06T04:16:41.449Z https://docs.axolotl.ai/docs/api/integrations.cut_cross_entropy.args.html - 2025-08-05T20:32:34.966Z + 2025-08-06T04:16:42.360Z https://docs.axolotl.ai/docs/api/monkeypatch.utils.html - 2025-08-05T20:32:34.540Z + 2025-08-06T04:16:41.939Z https://docs.axolotl.ai/docs/api/loaders.processor.html - 2025-08-05T20:32:34.091Z + 2025-08-06T04:16:41.493Z https://docs.axolotl.ai/docs/api/cli.config.html - 2025-08-05T20:32:33.897Z + 2025-08-06T04:16:41.301Z https://docs.axolotl.ai/docs/api/integrations.liger.args.html - 2025-08-05T20:32:34.978Z + 2025-08-06T04:16:42.372Z https://docs.axolotl.ai/docs/api/loaders.tokenizer.html - 2025-08-05T20:32:34.090Z + 2025-08-06T04:16:41.491Z https://docs.axolotl.ai/docs/api/utils.schemas.config.html - 2025-08-05T20:32:34.747Z + 2025-08-06T04:16:42.143Z https://docs.axolotl.ai/docs/api/utils.ctx_managers.sequence_parallel.html - 2025-08-05T20:32:34.148Z + 2025-08-06T04:16:41.549Z https://docs.axolotl.ai/docs/api/core.trainers.mixins.scheduler.html - 2025-08-05T20:32:34.124Z + 2025-08-06T04:16:41.526Z https://docs.axolotl.ai/docs/api/core.trainers.base.html - 2025-08-05T20:32:34.019Z + 2025-08-06T04:16:41.421Z https://docs.axolotl.ai/docs/api/cli.utils.args.html - 2025-08-05T20:32:33.980Z + 2025-08-06T04:16:41.383Z https://docs.axolotl.ai/docs/api/prompt_strategies.messages.chat.html - 2025-08-05T20:32:34.270Z + 2025-08-06T04:16:41.669Z https://docs.axolotl.ai/docs/api/monkeypatch.lora_kernels.html - 2025-08-05T20:32:34.532Z + 2025-08-06T04:16:41.931Z https://docs.axolotl.ai/docs/api/kernels.lora.html - 2025-08-05T20:32:34.424Z + 2025-08-06T04:16:41.824Z https://docs.axolotl.ai/docs/api/cli.vllm_serve.html - 2025-08-05T20:32:33.957Z + 2025-08-06T04:16:41.361Z https://docs.axolotl.ai/docs/api/utils.schemas.multimodal.html - 2025-08-05T20:32:34.796Z + 2025-08-06T04:16:42.192Z https://docs.axolotl.ai/docs/api/utils.schemas.utils.html - 2025-08-05T20:32:34.825Z + 2025-08-06T04:16:42.220Z https://docs.axolotl.ai/docs/api/monkeypatch.llama_attn_hijack_xformers.html - 2025-08-05T20:32:34.482Z + 2025-08-06T04:16:41.881Z https://docs.axolotl.ai/docs/api/integrations.lm_eval.args.html - 2025-08-05T20:32:34.982Z + 2025-08-06T04:16:42.376Z https://docs.axolotl.ai/docs/api/monkeypatch.mistral_attn_hijack_flash.html - 2025-08-05T20:32:34.497Z + 2025-08-06T04:16:41.896Z https://docs.axolotl.ai/docs/api/utils.collators.core.html - 2025-08-05T20:32:35.006Z + 2025-08-06T04:16:42.399Z https://docs.axolotl.ai/docs/api/core.chat.format.chatml.html - 2025-08-05T20:32:33.782Z + 2025-08-06T04:16:41.185Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.passthrough.html - 2025-08-05T20:32:34.296Z + 2025-08-06T04:16:41.696Z https://docs.axolotl.ai/docs/api/core.datasets.chat.html - 2025-08-05T20:32:33.790Z + 2025-08-06T04:16:41.193Z https://docs.axolotl.ai/docs/api/utils.bench.html - 2025-08-05T20:32:34.615Z + 2025-08-06T04:16:42.013Z https://docs.axolotl.ai/docs/api/utils.schemas.training.html - 2025-08-05T20:32:34.761Z + 2025-08-06T04:16:42.157Z https://docs.axolotl.ai/docs/api/utils.collators.batching.html - 2025-08-05T20:32:35.025Z + 2025-08-06T04:16:42.418Z https://docs.axolotl.ai/docs/api/prompt_strategies.input_output.html - 2025-08-05T20:32:34.243Z + 2025-08-06T04:16:41.643Z https://docs.axolotl.ai/docs/api/utils.lora.html - 2025-08-05T20:32:34.606Z + 2025-08-06T04:16:42.004Z https://docs.axolotl.ai/docs/api/prompt_strategies.base.html - 2025-08-05T20:32:34.149Z + 2025-08-06T04:16:41.551Z https://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_w_system.html - 2025-08-05T20:32:34.210Z + 2025-08-06T04:16:41.611Z https://docs.axolotl.ai/docs/api/utils.schemas.datasets.html - 2025-08-05T20:32:34.779Z + 2025-08-06T04:16:42.174Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.user_defined.html - 2025-08-05T20:32:34.295Z + 2025-08-06T04:16:41.694Z https://docs.axolotl.ai/docs/api/utils.schemas.peft.html - 2025-08-05T20:32:34.788Z + 2025-08-06T04:16:42.183Z https://docs.axolotl.ai/docs/api/prompt_strategies.pygmalion.html - 2025-08-05T20:32:34.265Z + 2025-08-06T04:16:41.665Z https://docs.axolotl.ai/docs/api/common.architectures.html - 2025-08-05T20:32:34.987Z + 2025-08-06T04:16:42.380Z https://docs.axolotl.ai/docs/api/monkeypatch.gradient_checkpointing.offload_cpu.html - 2025-08-05T20:32:34.567Z + 2025-08-06T04:16:41.965Z https://docs.axolotl.ai/docs/api/utils.callbacks.comet_.html - 2025-08-05T20:32:35.092Z + 2025-08-06T04:16:42.486Z https://docs.axolotl.ai/docs/api/integrations.spectrum.args.html - 2025-08-05T20:32:34.985Z + 2025-08-06T04:16:42.379Z https://docs.axolotl.ai/docs/api/cli.quantize.html - 2025-08-05T20:32:33.950Z + 2025-08-06T04:16:41.354Z https://docs.axolotl.ai/docs/api/cli.checks.html - 2025-08-05T20:32:33.879Z + 2025-08-06T04:16:41.283Z https://docs.axolotl.ai/docs/api/prompt_strategies.kto.llama3.html - 2025-08-05T20:32:34.305Z + 2025-08-06T04:16:41.704Z https://docs.axolotl.ai/docs/api/utils.model_shard_quant.html - 2025-08-05T20:32:34.612Z + 2025-08-06T04:16:42.010Z https://docs.axolotl.ai/docs/api/utils.quantization.html - 2025-08-05T20:32:34.732Z + 2025-08-06T04:16:42.129Z https://docs.axolotl.ai/docs/api/core.trainers.mixins.rng_state_loader.html - 2025-08-05T20:32:34.117Z + 2025-08-06T04:16:41.519Z https://docs.axolotl.ai/docs/api/kernels.geglu.html - 2025-08-05T20:32:34.435Z + 2025-08-06T04:16:41.834Z https://docs.axolotl.ai/docs/api/utils.data.pretraining.html - 2025-08-05T20:32:34.704Z + 2025-08-06T04:16:42.101Z https://docs.axolotl.ai/docs/api/prompt_strategies.kto.user_defined.html - 2025-08-05T20:32:34.314Z + 2025-08-06T04:16:41.714Z https://docs.axolotl.ai/docs/api/core.builders.base.html - 2025-08-05T20:32:33.734Z + 2025-08-06T04:16:41.139Z https://docs.axolotl.ai/docs/api/cli.merge_lora.html - 2025-08-05T20:32:33.925Z + 2025-08-06T04:16:41.329Z https://docs.axolotl.ai/docs/api/cli.utils.load.html - 2025-08-05T20:32:33.991Z + 2025-08-06T04:16:41.394Z https://docs.axolotl.ai/docs/api/utils.data.sft.html - 2025-08-05T20:32:34.711Z + 2025-08-06T04:16:42.108Z https://docs.axolotl.ai/docs/api/prompt_strategies.user_defined.html - 2025-08-05T20:32:34.218Z + 2025-08-06T04:16:41.619Z https://docs.axolotl.ai/docs/api/utils.tokenization.html - 2025-08-05T20:32:34.600Z + 2025-08-06T04:16:41.998Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.chatml.html - 2025-08-05T20:32:34.292Z + 2025-08-06T04:16:41.691Z https://docs.axolotl.ai/docs/api/models.mamba.modeling_mamba.html - 2025-08-05T20:32:35.004Z + 2025-08-06T04:16:42.398Z https://docs.axolotl.ai/docs/api/cli.args.html - 2025-08-05T20:32:33.869Z + 2025-08-06T04:16:41.273Z https://docs.axolotl.ai/docs/api/evaluate.html - 2025-08-05T20:32:33.651Z + 2025-08-06T04:16:41.057Z https://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_instruct.html - 2025-08-05T20:32:34.198Z + 2025-08-06T04:16:41.599Z https://docs.axolotl.ai/docs/api/utils.distributed.html - 2025-08-05T20:32:34.689Z + 2025-08-06T04:16:42.086Z https://docs.axolotl.ai/docs/multipack.html - 2025-08-05T20:29:19.436Z + 2025-08-06T04:13:21.028Z https://docs.axolotl.ai/examples/colab-notebooks/colab-axolotl-example.html - 2025-08-05T20:29:19.441Z + 2025-08-06T04:13:21.034Z https://docs.axolotl.ai/FAQS.html - 2025-08-05T20:29:19.431Z + 2025-08-06T04:13:21.021Z