From fcea4633b675c7c63877e46562a604bc99a37bb4 Mon Sep 17 00:00:00 2001 From: Quarto GHA Workflow Runner Date: Tue, 18 Feb 2025 03:00:28 +0000 Subject: [PATCH] Built site for gh-pages --- .nojekyll | 2 +- docs/config.html | 833 +++++++++++++------------ docs/dataset-formats/conversation.html | 39 +- docs/dataset-formats/index.html | 46 +- docs/faq.html | 4 + docs/rlhf.html | 13 +- search.json | 10 +- sitemap.xml | 78 +-- 8 files changed, 522 insertions(+), 503 deletions(-) diff --git a/.nojekyll b/.nojekyll index e0566d454..cd2ded2ab 100644 --- a/.nojekyll +++ b/.nojekyll @@ -1 +1 @@ -377f72f4 \ No newline at end of file +e72ca856 \ No newline at end of file diff --git a/docs/config.html b/docs/config.html index 8f6d473c3..8df7c7cf2 100644 --- a/docs/config.html +++ b/docs/config.html @@ -492,425 +492,434 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin # Key containing the messages (default: "messages") field_messages: messages - # Key for role in each message (default: "role") - message_field_role: role - # Key for content in each message (default: "content") - message_field_content: content - - # Optional[Dict[str, List]]. Roles mapping in the messages. The default is: - roles: - user: ["human", "user"] - assistant: ["gpt", "assistant"] - system: ["system"] - tool: ["tool"] + + # Mapping of properties from the input dataset to the chat template. + # (default: message_property_mappings={'role':'role', 'content':'content'}) + # If a property exists in the template but not in this mapping, the system will attempt + # to load it directly from the message using the property name as the key. + # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role', + # while 'value' is loaded and used as 'content' in the chat template. + message_property_mappings: + role: from + content: value + # ... - # IMPORTANT: The following fields determine which parts of the conversation to train on. - # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train - # See examples at `docs/dataset-formats/conversation.qmd` - # Note: If the below 4 fields are empty, defaults to training only on the last message. - - # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss. - roles_to_train: ["assistant"] # default - # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are: - # - all: train on all EOS tokens - # - turn (default): train on the EOS token at the end of each trainable turn - # - last: train on the last EOS token in the conversation - train_on_eos: last - # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`. - message_field_training: training - # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn. - # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train). - message_field_training_detail: train_detail - - -# If false, the datasets will not be shuffled and will keep their original order in `datasets`. -# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true. -shuffle_merged_datasets: true - -Deduplicates datasets and test_datasets with identical entries. -dataset_exact_deduplication: true - -# A list of one or more datasets to eval the model with. -# You can use either test_datasets, or val_set_size, but not both. -test_datasets: - - path: /workspace/data/eval.jsonl - ds_type: json - # You need to specify a split. For "json" datasets the default split is called "train". - split: train - type: completion - data_files: - - /workspace/data/eval.jsonl - -# use RL training: 'dpo', 'ipo', 'kto' -rl: -# whether to perform weighting if doing DPO training. Boolean. -dpo_use_weighting: - -# reward modelling: `True` or `False` -reward_model: - -# process reward modelling: `True` or `False` -process_reward_model: - -# The name of the chat template to use for training, following values are supported: -# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value. -# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py -# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer. -# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field. -# The selected chat template will be saved to the tokenizer_config.json for easier inferencing -# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template. -chat_template: tokenizer_default -# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null. -chat_template_jinja: null -# Changes the default system message -default_system_message: You are a helpful assistant. Please give a long and detailed answer. # Currently only supports chatml. -# Axolotl attempts to save the dataset as an arrow after packing the data together so -# subsequent training attempts load faster, relative path -dataset_prepared_path: data/last_run_prepared -# Push prepared dataset to hub -push_dataset_to_hub: # repo path -# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()` -# if not set. -dataset_processes: # defaults to os.cpu_count() if not set -# Keep dataset in memory while preprocessing -# Only needed if cached dataset is taking too much storage -dataset_keep_in_memory: -# push checkpoints to hub -hub_model_id: # private repo path to push finetuned model -# how to push checkpoints to hub -# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy -hub_strategy: -# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets -# Required to be true when used in combination with `push_dataset_to_hub` -hf_use_auth_token: # boolean -# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval. -val_set_size: 0.04 -# Num shards for whole dataset -dataset_shard_num: -# Index of shard to use for whole dataset -dataset_shard_idx: - -# The maximum length of an input to train with, this should typically be less than 2048 -# as most models have a token/context limit of 2048 -sequence_len: 2048 -# Pad inputs so each step uses constant sized buffers -# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently -pad_to_sequence_len: -# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true' -sample_packing: -# Set to 'false' if getting errors during eval with sample_packing on. -eval_sample_packing: -# You can set these packing optimizations AFTER starting a training at least once. -# The trainer will provide recommended values for these values. -sample_packing_eff_est: -total_num_tokens: -# Increasing the following values helps with packing, but usually only slightly (<%1.) -# The number of samples packed at a time. -sample_packing_group_size: 100000 -# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples. -sample_packing_bin_size: 200 -# whether to concatenate samples during pretraining -pretraining_sample_concatenation: - -# Use batch flattening for speedups when not using sample_packing -batch_flattening: - -# Passed through to transformers when loading the model when launched without accelerate -# Use `sequential` when training w/ model parallelism to limit memory -device_map: -# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model. -max_memory: + message_property_mappings: + + # Optional[Dict[str, List]]. Roles mapping in the messages. The default is: + roles: + user: ["human", "user"] + assistant: ["gpt", "assistant"] + system: ["system"] + tool: ["tool"] + + # IMPORTANT: The following fields determine which parts of the conversation to train on. + # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train + # See examples at `docs/dataset-formats/conversation.qmd` + # Note: If the below 4 fields are empty, defaults to training only on the last message. + + # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss. + roles_to_train: ["assistant"] # default + # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are: + # - all: train on all EOS tokens + # - turn (default): train on the EOS token at the end of each trainable turn + # - last: train on the last EOS token in the conversation + train_on_eos: last + # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`. + message_field_training: training + # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn. + # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train). + message_field_training_detail: train_detail + + +# If false, the datasets will not be shuffled and will keep their original order in `datasets`. +# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true. +shuffle_merged_datasets: true + +Deduplicates datasets and test_datasets with identical entries. +dataset_exact_deduplication: true + +# A list of one or more datasets to eval the model with. +# You can use either test_datasets, or val_set_size, but not both. +test_datasets: + - path: /workspace/data/eval.jsonl + ds_type: json + # You need to specify a split. For "json" datasets the default split is called "train". + split: train + type: completion + data_files: + - /workspace/data/eval.jsonl + +# use RL training: 'dpo', 'ipo', 'kto' +rl: +# whether to perform weighting if doing DPO training. Boolean. +dpo_use_weighting: + +# reward modelling: `True` or `False` +reward_model: + +# process reward modelling: `True` or `False` +process_reward_model: + +# The name of the chat template to use for training, following values are supported: +# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value. +# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py +# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer. +# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field. +# The selected chat template will be saved to the tokenizer_config.json for easier inferencing +# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template. +chat_template: tokenizer_default +# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null. +chat_template_jinja: null +# Changes the default system message +default_system_message: You are a helpful assistant. Please give a long and detailed answer. # Currently only supports chatml. +# Axolotl attempts to save the dataset as an arrow after packing the data together so +# subsequent training attempts load faster, relative path +dataset_prepared_path: data/last_run_prepared +# Push prepared dataset to hub +push_dataset_to_hub: # repo path +# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()` +# if not set. +dataset_processes: # defaults to os.cpu_count() if not set +# Keep dataset in memory while preprocessing +# Only needed if cached dataset is taking too much storage +dataset_keep_in_memory: +# push checkpoints to hub +hub_model_id: # private repo path to push finetuned model +# how to push checkpoints to hub +# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy +hub_strategy: +# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets +# Required to be true when used in combination with `push_dataset_to_hub` +hf_use_auth_token: # boolean +# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval. +val_set_size: 0.04 +# Num shards for whole dataset +dataset_shard_num: +# Index of shard to use for whole dataset +dataset_shard_idx: + +# The maximum length of an input to train with, this should typically be less than 2048 +# as most models have a token/context limit of 2048 +sequence_len: 2048 +# Pad inputs so each step uses constant sized buffers +# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently +pad_to_sequence_len: +# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true' +sample_packing: +# Set to 'false' if getting errors during eval with sample_packing on. +eval_sample_packing: +# You can set these packing optimizations AFTER starting a training at least once. +# The trainer will provide recommended values for these values. +sample_packing_eff_est: +total_num_tokens: +# Increasing the following values helps with packing, but usually only slightly (<%1.) +# The number of samples packed at a time. +sample_packing_group_size: 100000 +# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples. +sample_packing_bin_size: 200 +# whether to concatenate samples during pretraining +pretraining_sample_concatenation: -# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model -adapter: lora -# If you already have a lora model trained that you want to load, put that here. -# This means after training, if you want to test the model, you should set this to the value of `output_dir`. -# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`. -lora_model_dir: - -# LoRA hyperparameters -# For more details about the following options, see: -# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2 -lora_r: 8 -lora_alpha: 16 -lora_dropout: 0.05 -lora_target_modules: - - q_proj - - v_proj -# - k_proj -# - o_proj -# - gate_proj -# - down_proj -# - up_proj -lora_target_linear: # If true, will target all linear modules -peft_layers_to_transform: # The layer indices to transform, otherwise, apply to all layers - -# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens. -# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models. -# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities. -# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994 -lora_modules_to_save: -# - embed_tokens -# - lm_head - -lora_fan_in_fan_out: false - -# Apply custom LoRA autograd functions and activation function Triton kernels for -# speed and memory savings -# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html -lora_mlp_kernel: true -lora_qkv_kernel: true -lora_o_kernel: true +# Use batch flattening for speedups when not using sample_packing +batch_flattening: + +# Passed through to transformers when loading the model when launched without accelerate +# Use `sequential` when training w/ model parallelism to limit memory +device_map: +# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model. +max_memory: + +# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model +adapter: lora +# If you already have a lora model trained that you want to load, put that here. +# This means after training, if you want to test the model, you should set this to the value of `output_dir`. +# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`. +lora_model_dir: + +# LoRA hyperparameters +# For more details about the following options, see: +# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2 +lora_r: 8 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_modules: + - q_proj + - v_proj +# - k_proj +# - o_proj +# - gate_proj +# - down_proj +# - up_proj +lora_target_linear: # If true, will target all linear modules +peft_layers_to_transform: # The layer indices to transform, otherwise, apply to all layers + +# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens. +# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models. +# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities. +# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994 +lora_modules_to_save: +# - embed_tokens +# - lm_head -# LoRA+ hyperparameters -# For more details about the following options, see: -# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py` -loraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4. -loraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6. - -peft: - # Configuration options for loftq initialization for LoRA - # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization - loftq_config: - loftq_bits: # typically 4 bits - -# ReLoRA configuration -# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed -relora_steps: # Number of steps per ReLoRA restart -relora_warmup_steps: # Number of per-restart warmup steps -relora_anneal_steps: # Number of anneal steps for each relora cycle -relora_prune_ratio: # threshold for optimizer magnitude when pruning -relora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings - -# wandb configuration if you're using it -# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`. -wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb -wandb_project: # Your wandb project name -wandb_entity: # A wandb Team name if using a Team -wandb_watch: -wandb_name: # Set the name of your wandb run -wandb_run_id: # Set the ID of your wandb run -wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training - -# mlflow configuration if you're using it -mlflow_tracking_uri: # URI to mlflow -mlflow_experiment_name: # Your experiment name -mlflow_run_name: # Your run name -hf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry - -# Comet configuration if you're using it -# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`. -# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start -use_comet: # Enable or disable Comet integration. -comet_api_key: # API key for Comet. Recommended to set via `comet login`. -comet_workspace: # Workspace name in Comet. Defaults to the user's default workspace. -comet_project_name: # Project name in Comet. Defaults to Uncategorized. -comet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key. -comet_mode: # Create a new experiment ("create") or log to an existing one ("get"). Default ("get_or_create") auto-selects based on configuration. -comet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True. -comet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details. - -# Tensorboard -use_tensorboard: # Optional[bool] - -# Where to save the full-finetuned model to -output_dir: ./completed-model - -# Whether to use torch.compile and which backend to use -# setting to `auto` will enable torch compile when torch>=2.5.1 -torch_compile: # Optional[Union[Literal["auto"], bool]] -torch_compile_backend: # Optional[str] - -# Training hyperparameters - -# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps. -gradient_accumulation_steps: 1 -# The number of samples to include in each batch. This is the number of samples sent to each GPU. -# Batch size per gpu = micro_batch_size * gradient_accumulation_steps -micro_batch_size: 2 -eval_batch_size: -num_epochs: 4 -warmup_steps: 100 # cannot use with warmup_ratio -warmup_ratio: 0.05 # cannot use with warmup_steps -learning_rate: 0.00003 -lr_quadratic_warmup: -logging_steps: -eval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps -evals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps -eval_strategy: # Set to `"no"` to skip evaluation, `"epoch"` at end of each epoch, leave empty to infer from `eval_steps`. -save_strategy: # Set to `"no"` to skip checkpoint saves, `"epoch"` at end of each epoch, `"best"` when better result is achieved, leave empty to infer from `save_steps`. -save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps -saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps -save_total_limit: # Checkpoints saved at a time -# Maximum number of iterations to train for. It precedes num_epochs which means that -# if both are set, num_epochs will not be guaranteed. -# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps -max_steps: - -# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time. -include_tokens_per_second: - -eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0 -eval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128 -eval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is ["sacrebleu", "comet", "ter", "chrf", "perplexity"] - -profiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir. - # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information - # snapshots can be visualized @ https://pytorch.org/memory_viz - -loss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training) -loss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3) - -# Save model as safetensors (require safetensors package) -save_safetensors: - -# Whether to mask out or include the human's prompt from the training labels -train_on_inputs: false -# Group similarly sized data to minimize padding. -# May be slower to start, as it must download and sort the entire dataset. -# Note that training loss may have an oscillating pattern with this enabled. -group_by_length: false - -# Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing -gradient_checkpointing: false -# additional kwargs to pass to the trainer for gradient checkpointing -# gradient_checkpointing_kwargs: -# use_reentrant: true - -# Stop training after this many evaluation losses have increased in a row -# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback -early_stopping_patience: 3 - -# Specify a scheduler and kwargs to use with the optimizer -lr_scheduler: # 'one_cycle' | 'log_sweep' | empty for cosine -lr_scheduler_kwargs: -cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr -cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf) - -# For one_cycle optim -lr_div_factor: # Learning rate div factor +lora_fan_in_fan_out: false + +# Apply custom LoRA autograd functions and activation function Triton kernels for +# speed and memory savings +# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html +lora_mlp_kernel: true +lora_qkv_kernel: true +lora_o_kernel: true + +# LoRA+ hyperparameters +# For more details about the following options, see: +# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py` +loraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4. +loraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6. + +peft: + # Configuration options for loftq initialization for LoRA + # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization + loftq_config: + loftq_bits: # typically 4 bits + +# ReLoRA configuration +# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed +relora_steps: # Number of steps per ReLoRA restart +relora_warmup_steps: # Number of per-restart warmup steps +relora_anneal_steps: # Number of anneal steps for each relora cycle +relora_prune_ratio: # threshold for optimizer magnitude when pruning +relora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings + +# wandb configuration if you're using it +# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`. +wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb +wandb_project: # Your wandb project name +wandb_entity: # A wandb Team name if using a Team +wandb_watch: +wandb_name: # Set the name of your wandb run +wandb_run_id: # Set the ID of your wandb run +wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training + +# mlflow configuration if you're using it +mlflow_tracking_uri: # URI to mlflow +mlflow_experiment_name: # Your experiment name +mlflow_run_name: # Your run name +hf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry + +# Comet configuration if you're using it +# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`. +# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start +use_comet: # Enable or disable Comet integration. +comet_api_key: # API key for Comet. Recommended to set via `comet login`. +comet_workspace: # Workspace name in Comet. Defaults to the user's default workspace. +comet_project_name: # Project name in Comet. Defaults to Uncategorized. +comet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key. +comet_mode: # Create a new experiment ("create") or log to an existing one ("get"). Default ("get_or_create") auto-selects based on configuration. +comet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True. +comet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details. + +# Tensorboard +use_tensorboard: # Optional[bool] + +# Where to save the full-finetuned model to +output_dir: ./completed-model + +# Whether to use torch.compile and which backend to use +# setting to `auto` will enable torch compile when torch>=2.5.1 +torch_compile: # Optional[Union[Literal["auto"], bool]] +torch_compile_backend: # Optional[str] + +# Training hyperparameters + +# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps. +gradient_accumulation_steps: 1 +# The number of samples to include in each batch. This is the number of samples sent to each GPU. +# Batch size per gpu = micro_batch_size * gradient_accumulation_steps +micro_batch_size: 2 +eval_batch_size: +num_epochs: 4 +warmup_steps: 100 # cannot use with warmup_ratio +warmup_ratio: 0.05 # cannot use with warmup_steps +learning_rate: 0.00003 +lr_quadratic_warmup: +logging_steps: +eval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps +evals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps +eval_strategy: # Set to `"no"` to skip evaluation, `"epoch"` at end of each epoch, leave empty to infer from `eval_steps`. +save_strategy: # Set to `"no"` to skip checkpoint saves, `"epoch"` at end of each epoch, `"best"` when better result is achieved, leave empty to infer from `save_steps`. +save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps +saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps +save_total_limit: # Checkpoints saved at a time +# Maximum number of iterations to train for. It precedes num_epochs which means that +# if both are set, num_epochs will not be guaranteed. +# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps +max_steps: + +# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time. +include_tokens_per_second: + +eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0 +eval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128 +eval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is ["sacrebleu", "comet", "ter", "chrf", "perplexity"] + +profiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir. + # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information + # snapshots can be visualized @ https://pytorch.org/memory_viz + +loss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training) +loss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3) + +# Save model as safetensors (require safetensors package) +save_safetensors: + +# Whether to mask out or include the human's prompt from the training labels +train_on_inputs: false +# Group similarly sized data to minimize padding. +# May be slower to start, as it must download and sort the entire dataset. +# Note that training loss may have an oscillating pattern with this enabled. +group_by_length: false + +# Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing +gradient_checkpointing: false +# additional kwargs to pass to the trainer for gradient checkpointing +# gradient_checkpointing_kwargs: +# use_reentrant: true + +# Stop training after this many evaluation losses have increased in a row +# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback +early_stopping_patience: 3 -# Specify optimizer -# Valid values are driven by the Transformers OptimizerNames class, see: -# https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134 -# -# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of -# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used -# in the examples/ for your model and fine-tuning use case. -# -# Valid values for 'optimizer' include: -# - adamw_hf -# - adamw_torch -# - adamw_torch_fused -# - adamw_torch_xla -# - adamw_apex_fused -# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1) -# - adafactor -# - adamw_anyprecision -# - sgd -# - adagrad -# - adamw_bnb_8bit -# - lion_8bit -# - lion_32bit -# - paged_adamw_32bit -# - paged_adamw_8bit -# - paged_lion_32bit -# - paged_lion_8bit -# - galore_adamw -# - galore_adamw_8bit -# - galore_adafactor -# - galore_adamw_layerwise -# - galore_adamw_8bit_layerwise -# - galore_adafactor_layerwise -optimizer: -# Dictionary of arguments to pass to the optimizer -optim_args: -# For Galore Optimizers the following optim_args are available -# rank: # type: int -# update_proj_gap # type: int -# scale # type: float -# proj_type: # type: str, default = std - -# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm -optim_target_modules: -# - self_attn # for llama -# - mlp - -# Specify weight decay -weight_decay: -# adamw hyperparams -adam_beta1: -adam_beta2: -adam_epsilon: -# Gradient clipping max norm -max_grad_norm: +# Specify a scheduler and kwargs to use with the optimizer +lr_scheduler: # 'one_cycle' | 'log_sweep' | empty for cosine +lr_scheduler_kwargs: +cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr +cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf) + +# For one_cycle optim +lr_div_factor: # Learning rate div factor + +# Specify optimizer +# Valid values are driven by the Transformers OptimizerNames class, see: +# https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134 +# +# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of +# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used +# in the examples/ for your model and fine-tuning use case. +# +# Valid values for 'optimizer' include: +# - adamw_hf +# - adamw_torch +# - adamw_torch_fused +# - adamw_torch_xla +# - adamw_apex_fused +# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1) +# - adafactor +# - adamw_anyprecision +# - sgd +# - adagrad +# - adamw_bnb_8bit +# - lion_8bit +# - lion_32bit +# - paged_adamw_32bit +# - paged_adamw_8bit +# - paged_lion_32bit +# - paged_lion_8bit +# - galore_adamw +# - galore_adamw_8bit +# - galore_adafactor +# - galore_adamw_layerwise +# - galore_adamw_8bit_layerwise +# - galore_adafactor_layerwise +optimizer: +# Dictionary of arguments to pass to the optimizer +optim_args: +# For Galore Optimizers the following optim_args are available +# rank: # type: int +# update_proj_gap # type: int +# scale # type: float +# proj_type: # type: str, default = std + +# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm +optim_target_modules: +# - self_attn # for llama +# - mlp -# Augmentation techniques -# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings -# currently only supported on Llama and Mistral -neftune_noise_alpha: - -# Whether to bettertransformers -flash_optimum: -# Whether to use xformers attention patch https://github.com/facebookresearch/xformers: -xformers_attention: -# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention: -flash_attention: -flash_attn_cross_entropy: # Whether to use flash-attention cross entropy implementation - advanced use only -flash_attn_rms_norm: # Whether to use flash-attention rms norm implementation - advanced use only -flash_attn_fuse_qkv: # Whether to fuse QKV into a single operation -flash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation -# Whether to use scaled-dot-product attention -# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html -sdp_attention: -# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf -s2_attention: -# Resume from a specific checkpoint dir -resume_from_checkpoint: -# If resume_from_checkpoint isn't set and you simply want it to start where it left off. -# Be careful with this being turned on between different models. -auto_resume_from_checkpoints: false - -# Don't mess with this, it's here for accelerate and torchrun -local_rank: - -# Add or change special tokens. -# If you add tokens here, you don't need to add them to the `tokens` list. -special_tokens: - # bos_token: "<s>" - # eos_token: "</s>" - # unk_token: "<unk>" - # pad_token: "[PAD]" - -# Add extra tokens. -tokens: - -# FSDP -fsdp: -fsdp_config: - -# Deepspeed config path. e.g., deepspeed_configs/zero3.json -deepspeed: - -# Advanced DDP Arguments -ddp_timeout: -ddp_bucket_cap_mb: -ddp_broadcast_buffers: - -# Path to torch distx for optim 'adamw_anyprecision' -torchdistx_path: - -# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize -pretraining_dataset: - -# Debug mode -debug: +# Specify weight decay +weight_decay: +# adamw hyperparams +adam_beta1: +adam_beta2: +adam_epsilon: +# Gradient clipping max norm +max_grad_norm: + +# Augmentation techniques +# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings +# currently only supported on Llama and Mistral +neftune_noise_alpha: + +# Whether to bettertransformers +flash_optimum: +# Whether to use xformers attention patch https://github.com/facebookresearch/xformers: +xformers_attention: +# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention: +flash_attention: +flash_attn_cross_entropy: # Whether to use flash-attention cross entropy implementation - advanced use only +flash_attn_rms_norm: # Whether to use flash-attention rms norm implementation - advanced use only +flash_attn_fuse_qkv: # Whether to fuse QKV into a single operation +flash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation +# Whether to use scaled-dot-product attention +# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html +sdp_attention: +# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf +s2_attention: +# Resume from a specific checkpoint dir +resume_from_checkpoint: +# If resume_from_checkpoint isn't set and you simply want it to start where it left off. +# Be careful with this being turned on between different models. +auto_resume_from_checkpoints: false + +# Don't mess with this, it's here for accelerate and torchrun +local_rank: + +# Add or change special tokens. +# If you add tokens here, you don't need to add them to the `tokens` list. +special_tokens: + # bos_token: "<s>" + # eos_token: "</s>" + # unk_token: "<unk>" + # pad_token: "[PAD]" + +# Add extra tokens. +tokens: + +# FSDP +fsdp: +fsdp_config: + +# Deepspeed config path. e.g., deepspeed_configs/zero3.json +deepspeed: + +# Advanced DDP Arguments +ddp_timeout: +ddp_bucket_cap_mb: +ddp_broadcast_buffers: -# Seed -seed: +# Path to torch distx for optim 'adamw_anyprecision' +torchdistx_path: -# Allow overwrite yml config using from cli -strict: +# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize +pretraining_dataset: + +# Debug mode +debug: + +# Seed +seed: + +# Allow overwrite yml config using from cli +strict: diff --git a/docs/dataset-formats/conversation.html b/docs/dataset-formats/conversation.html index 769cdcf85..4617ca471 100644 --- a/docs/dataset-formats/conversation.html +++ b/docs/dataset-formats/conversation.html @@ -405,18 +405,20 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin type: chat_template field_messages: conversations - message_field_role: from - message_field_content: value - -# new (if setting a new chat_template like chatml, gemma, etc) -chat_template: chatml -datasets: - - path: ... - type: chat_template - - field_messages: conversations - message_field_role: from - message_field_content: value + message_property_mappings: + role: from + content: value + +# new (if setting a new chat_template like chatml, gemma, etc) +chat_template: chatml +datasets: + - path: ... + type: chat_template + + field_messages: conversations + message_property_mappings: + role: from + content: value

We recommend checking the below examples for other usecases.

@@ -491,12 +493,13 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin type: chat_template chat_template: tokenizer_default field_messages: conversations - message_field_role: from - message_field_content: value - roles_to_train: [] - train_on_eos: turn - message_field_training: train - message_field_training_detail: train_detail + message_property_mappings: + role: from + content: value + roles_to_train: [] + train_on_eos: turn + message_field_training: train + message_field_training_detail: train_detail

Tip: It is not necessary to use both message_field_training and message_field_training_detail at a time.

diff --git a/docs/dataset-formats/index.html b/docs/dataset-formats/index.html index b622b019c..6a477c943 100644 --- a/docs/dataset-formats/index.html +++ b/docs/dataset-formats/index.html @@ -476,7 +476,7 @@ Important
  • Is your dataset in a “conversation” format, containing a list[messages]? If yes, check Conversation Dataset

  • Is your dataset in an “instruct” format, containing { instruction, response }? If yes, check Instruction Dataset

  • -

    If you went through the flow chart and did not find one that matches, it is recommended to preprocess your dataset into one of the above or create a Github Discussion.

    +

    If you went through the flow chart and did not find one that matches, it is recommended to preprocess your dataset into one of the above or create a thread on Github Discussion.

    @@ -638,9 +638,10 @@ The answer is 8.<|im_end|>

    If your dataset format is different, here are the keys you should check (with their defaults):

    datasets:
         ...
    -    field_messages: messages
    -    message_field_role: role
    -    message_field_content: content
    + field_messages: messages # this should point to the key containing the list of conversations + message_property_mappings: # this is a mapping from keys in your dataset to keys in chat_template + role: role + content: content

    In some chat_templates (e.g. Gemma), the roles are hardcoded to user and assistant. Consequently, you may find it necessary to map the roles in your dataset to these above. We currently have some defaults that should work for common datasets, but if you get a KeyError, it would be necessary to add mapping for your roles. Here is an example of how it would look like:

    datasets:
         ...
    @@ -686,24 +687,25 @@ The answer is 8.<|im_end|>
    # step 2 field_messages: messages - message_field_role: role - message_field_content: content - - roles: - assistant: - - gpt - - model - - assistant - user: - - human - - user - - # step 3 - roles_to_train: ["assistant"] - train_on_eos: "turn" - -special_tokens: - eos_token: <|im_end|>
    + message_property_mappings: + role: role + content: content + + roles: + assistant: + - gpt + - model + - assistant + user: + - human + - user + + # step 3 + roles_to_train: ["assistant"] + train_on_eos: "turn" + +special_tokens: + eos_token: <|im_end|>

    If this config were to be applied to the sample dataset above, the output would look as such (which can be retrieved via axolotl preprocess config.yaml --debug):

    <|im_start|>(-100, 128256) user(-100, 882)
     (-100, 198) Hi(-100, 13347) <|im_end|>(-100, 128257)
    diff --git a/docs/faq.html b/docs/faq.html
    index 28a8e857f..119aaa22a 100644
    --- a/docs/faq.html
    +++ b/docs/faq.html
    @@ -340,6 +340,10 @@ ul.task-list li input[type="checkbox"] {
     

    A: This is usually an issue with the GPU. This can be resolved through setting the os environment variable CUDA_VISIBLE_DEVICES=0. If you are on runpod, this is usually a pod issue. Starting a new pod should take care of it.

    +

    Q: jinja2.exceptions.UndefinedError: 'dict object' has no attribute 'content' / 'role' / ____

    +
    +

    A: This means that the property mapping for the stated attribute does not exist when building chat_template prompt. For example, if no attribute 'content', please check you have added the correct mapping for content under message_property_mappings.

    +
    diff --git a/docs/rlhf.html b/docs/rlhf.html index 0b1f1916c..bc26735d3 100644 --- a/docs/rlhf.html +++ b/docs/rlhf.html @@ -609,12 +609,13 @@ Tip field_messages: "messages" field_chosen: "chosen" field_rejected: "rejected" - message_field_role: "role" - message_field_content: "content" - roles: - user: ["user"] - assistant: ["assistant"] - system: ["system"]
    + message_property_mappings: + role: role + content: content + roles: + user: ["user"] + assistant: ["assistant"] + system: ["system"]

    Sample input format:

    {
         "messages": [
    diff --git a/search.json b/search.json
    index abfd33714..8b87d9df0 100644
    --- a/search.json
    +++ b/search.json
    @@ -105,7 +105,7 @@
         "href": "docs/rlhf.html#dpo",
         "title": "RLHF (Beta)",
         "section": "DPO",
    -    "text": "DPO\nExample config:\nrl: dpo\ndatasets:\n  - path: Intel/orca_dpo_pairs\n    split: train\n    type: chatml.intel\n  - path: argilla/ultrafeedback-binarized-preferences\n    split: train\n    type: chatml\nDPO supports the following types with the following dataset format:\n\nchatml.argilla\n{\n    \"system\": \"...\", // optional\n    \"instruction\": \"...\",\n    \"chosen_response\": \"...\",\n    \"rejected_response\": \"...\"\n}\n\n\nchatml.argilla_chat\n{\n    \"chosen\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ],\n    \"rejected\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ]\n}\n\n\nchatml.icr\n{\n    \"system\": \"...\", // optional\n    \"input\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}\n\n\nchatml.intel\n{\n    \"system\": \"...\", // optional\n    \"question\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}\n\n\nchatml.prompt_pairs\n{\n    \"system\": \"...\", // optional\n    \"prompt\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}\n\n\nchatml.ultra\n{\n    \"system\": \"...\", // optional\n    \"prompt\": \"...\",\n    \"chosen\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ],\n    \"rejected\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ]\n}\n\n\nllama3.argilla\n{\n    \"system\": \"...\", // optional\n    \"instruction\": \"...\",\n    \"chosen_response\": \"...\",\n    \"rejected_response\": \"...\"\n}\n\n\nllama3.argilla_chat\n{\n    \"chosen\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ],\n    \"rejected\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ]\n}\n\n\nllama3.icr\n{\n    \"system\": \"...\", // optional\n    \"input\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}\n\n\nllama3.intel\n{\n    \"system\": \"...\", // optional\n    \"question\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}\n\n\nllama3.prompt_pairs\n{\n    \"system\": \"...\", // optional\n    \"prompt\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}\n\n\nllama3.ultra\n{\n    \"system\": \"...\", // optional\n    \"prompt\": \"...\",\n    \"chosen\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ],\n    \"rejected\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ]\n}\n\n\nzephyr.nectar\n{\n    \"prompt\": \"...\",\n    \"answers\": [\n        {\n            \"answer\": \"...\",\n            \"rank\": 1\n        },\n        {\n            \"answer\": \"...\",\n            \"rank\": 2\n        }\n        // ... more answers with ranks\n    ]\n}\n\n\nchat_template.default\nrl: dpo\ndatasets:\n  - path: ...\n    split: train\n    type: chat_template.default\n    field_messages: \"messages\"\n    field_chosen: \"chosen\"\n    field_rejected: \"rejected\"\n    message_field_role: \"role\"\n    message_field_content: \"content\"\n    roles:\n      user: [\"user\"]\n      assistant: [\"assistant\"]\n      system: [\"system\"]\nSample input format:\n{\n    \"messages\": [\n        {\n            \"role\": \"system\",\n            \"content\": \"...\"\n        },\n        {\n            \"role\": \"user\",\n            \"content\": \"...\"\n        },\n        // ... more messages\n    ],\n    \"chosen\": {\n        \"role\": \"assistant\",\n        \"content\": \"...\"\n    },\n    \"rejected\": {\n        \"role\": \"assistant\",\n        \"content\": \"...\"\n    }\n}\n\n\nuser_defined.default\nFor custom behaviors,\nrl: dpo\ndatasets:\n  - path: ...\n    split: train\n    type: user_defined.default\n\n    field_prompt: \"prompt\"\n    field_system: \"system\"\n    field_chosen: \"chosen\"\n    field_rejected: \"rejected\"\n    prompt_format: \"{prompt}\"\n    chosen_format: \"{chosen}\"\n    rejected_format: \"{rejected}\"\nThe input format is a simple JSON input with customizable fields based on the above config.\n{\n    \"system\": \"...\",  // optional\n    \"prompt\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}",
    +    "text": "DPO\nExample config:\nrl: dpo\ndatasets:\n  - path: Intel/orca_dpo_pairs\n    split: train\n    type: chatml.intel\n  - path: argilla/ultrafeedback-binarized-preferences\n    split: train\n    type: chatml\nDPO supports the following types with the following dataset format:\n\nchatml.argilla\n{\n    \"system\": \"...\", // optional\n    \"instruction\": \"...\",\n    \"chosen_response\": \"...\",\n    \"rejected_response\": \"...\"\n}\n\n\nchatml.argilla_chat\n{\n    \"chosen\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ],\n    \"rejected\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ]\n}\n\n\nchatml.icr\n{\n    \"system\": \"...\", // optional\n    \"input\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}\n\n\nchatml.intel\n{\n    \"system\": \"...\", // optional\n    \"question\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}\n\n\nchatml.prompt_pairs\n{\n    \"system\": \"...\", // optional\n    \"prompt\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}\n\n\nchatml.ultra\n{\n    \"system\": \"...\", // optional\n    \"prompt\": \"...\",\n    \"chosen\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ],\n    \"rejected\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ]\n}\n\n\nllama3.argilla\n{\n    \"system\": \"...\", // optional\n    \"instruction\": \"...\",\n    \"chosen_response\": \"...\",\n    \"rejected_response\": \"...\"\n}\n\n\nllama3.argilla_chat\n{\n    \"chosen\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ],\n    \"rejected\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ]\n}\n\n\nllama3.icr\n{\n    \"system\": \"...\", // optional\n    \"input\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}\n\n\nllama3.intel\n{\n    \"system\": \"...\", // optional\n    \"question\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}\n\n\nllama3.prompt_pairs\n{\n    \"system\": \"...\", // optional\n    \"prompt\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}\n\n\nllama3.ultra\n{\n    \"system\": \"...\", // optional\n    \"prompt\": \"...\",\n    \"chosen\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ],\n    \"rejected\": [\n        {\"role\": \"user\", \"content\": \"...\"},\n        {\"role\": \"assistant\", \"content\": \"...\"}\n    ]\n}\n\n\nzephyr.nectar\n{\n    \"prompt\": \"...\",\n    \"answers\": [\n        {\n            \"answer\": \"...\",\n            \"rank\": 1\n        },\n        {\n            \"answer\": \"...\",\n            \"rank\": 2\n        }\n        // ... more answers with ranks\n    ]\n}\n\n\nchat_template.default\nrl: dpo\ndatasets:\n  - path: ...\n    split: train\n    type: chat_template.default\n    field_messages: \"messages\"\n    field_chosen: \"chosen\"\n    field_rejected: \"rejected\"\n    message_property_mappings:\n      role: role\n      content: content\n    roles:\n      user: [\"user\"]\n      assistant: [\"assistant\"]\n      system: [\"system\"]\nSample input format:\n{\n    \"messages\": [\n        {\n            \"role\": \"system\",\n            \"content\": \"...\"\n        },\n        {\n            \"role\": \"user\",\n            \"content\": \"...\"\n        },\n        // ... more messages\n    ],\n    \"chosen\": {\n        \"role\": \"assistant\",\n        \"content\": \"...\"\n    },\n    \"rejected\": {\n        \"role\": \"assistant\",\n        \"content\": \"...\"\n    }\n}\n\n\nuser_defined.default\nFor custom behaviors,\nrl: dpo\ndatasets:\n  - path: ...\n    split: train\n    type: user_defined.default\n\n    field_prompt: \"prompt\"\n    field_system: \"system\"\n    field_chosen: \"chosen\"\n    field_rejected: \"rejected\"\n    prompt_format: \"{prompt}\"\n    chosen_format: \"{chosen}\"\n    rejected_format: \"{rejected}\"\nThe input format is a simple JSON input with customizable fields based on the above config.\n{\n    \"system\": \"...\",  // optional\n    \"prompt\": \"...\",\n    \"chosen\": \"...\",\n    \"rejected\": \"...\"\n}",
         "crumbs": [
           "How-To Guides",
           "RLHF (Beta)"
    @@ -193,7 +193,7 @@
         "href": "docs/faq.html",
         "title": "FAQ",
         "section": "",
    -    "text": "Q: The trainer stopped and hasn’t progressed in several minutes.\n\nA: Usually an issue with the GPUs communicating with each other. See the NCCL doc\n\nQ: Exitcode -9\n\nA: This usually happens when you run out of system RAM.\n\nQ: Exitcode -7 while using deepspeed\n\nA: Try upgrading deepspeed w: pip install -U deepspeed\n\nQ: AttributeError: ‘DummyOptim’ object has no attribute ‘step’\n\nA: You may be using deepspeed with single gpu. Please don’t set deepspeed: in yaml or cli.\n\nQ: The codes is stuck on saving preprocessed datasets.\n\nA: This is usually an issue with the GPU. This can be resolved through setting the os environment variable CUDA_VISIBLE_DEVICES=0. If you are on runpod, this is usually a pod issue. Starting a new pod should take care of it.",
    +    "text": "Q: The trainer stopped and hasn’t progressed in several minutes.\n\nA: Usually an issue with the GPUs communicating with each other. See the NCCL doc\n\nQ: Exitcode -9\n\nA: This usually happens when you run out of system RAM.\n\nQ: Exitcode -7 while using deepspeed\n\nA: Try upgrading deepspeed w: pip install -U deepspeed\n\nQ: AttributeError: ‘DummyOptim’ object has no attribute ‘step’\n\nA: You may be using deepspeed with single gpu. Please don’t set deepspeed: in yaml or cli.\n\nQ: The codes is stuck on saving preprocessed datasets.\n\nA: This is usually an issue with the GPU. This can be resolved through setting the os environment variable CUDA_VISIBLE_DEVICES=0. If you are on runpod, this is usually a pod issue. Starting a new pod should take care of it.\n\nQ: jinja2.exceptions.UndefinedError: 'dict object' has no attribute 'content' / 'role' / ____\n\nA: This means that the property mapping for the stated attribute does not exist when building chat_template prompt. For example, if no attribute 'content', please check you have added the correct mapping for content under message_property_mappings.",
         "crumbs": [
           "FAQ"
         ]
    @@ -340,7 +340,7 @@
         "href": "docs/dataset-formats/index.html#supervised-fine-tuning-sft",
         "title": "Dataset Formats",
         "section": "Supervised fine-tuning (SFT)",
    -    "text": "Supervised fine-tuning (SFT)\nSupervised fine-tuning is the process of training models to respond to an instruction or chat input.\nAs there are a wide variety of dataset formats, Axolotl tries to support a majority of the formats available in public datasets.\nAxolotl provides four approaches for loading datasets, however, it’s easier to work backwards from the dataset you have available to figure out which approach to use.\nA flow chart is as follows:\n\nDo you already have the dataset tokenized? If yes, check Pre-Tokenized Dataset.\nDo you want to format the dataset yourself and manually choose each section to mask? If yes, check Template Free Dataset\nIs your dataset in a “conversation” format, containing a list[messages]? If yes, check Conversation Dataset\nIs your dataset in an “instruct” format, containing { instruction, response }? If yes, check Instruction Dataset\n\nIf you went through the flow chart and did not find one that matches, it is recommended to preprocess your dataset into one of the above or create a Github Discussion.\n\n\n\n\n\n\nTip\n\n\n\nYou can mix and match within each approach or across approaches to train a model on a variety of datasets.\n\n\n\nPre-Tokenized Dataset\nWe suggest this approach when you want to bring your own tokenized dataset.\nAxolotl expects the dataset to have three keys: - input_ids: from tokenizing formatted prompt - attention_mask: for masking padding. If you don’t add padding, it would be equal to len(input_ids) * [1] - labels: this is the same as input_ids, however, if you want to mask certain tokens, you would set those indices to -100.\n\n\n\n\n\n\nTip\n\n\n\nMake sure to add BOS/EOS tokens to your prompt and mask it appropriately.\n\n\nA config for this would look like:\ndatasets:\n  - path: A.jsonl\n    type:\n\n\n\n\n\n\nNote\n\n\n\ntype: is empty!\n\n\n\n\nTemplate Free Dataset\nWe reccomend this approach when you want granular control over the prompt formatting, special tokens, and masking, whilst letting Axolotl handle the tokenization. This is very useful if your dataset has unique prompts that differ across samples and where one single general template wouldn’t suffice.\nIn the example below, you could see that there is no proper structure. At the same time, it’s very flexible as there are no constraints on how your prompt can look.\n{\n    \"segments\": [\n        {\n            \"label\": true,\n            \"text\": \"<s>Hello\\n\"\n        },\n        {\n            \"label\": true,\n            \"text\": \"hi there!. \"\n        },\n        {\n            \"label\": false,\n            \"text\": \"goodbye \"\n        },\n        {\n            \"label\": true,\n            \"text\": \"farewell</s>\"\n        }\n    ]\n}\nEach prompt must be have a key called segments which is a list of { text, label }.\ndatasets:\n  - path: A.jsonl\n    type: input_output\n\n\nConversation Dataset\nconversation messages are a list of messages which usually contain a role and content key.\n\n\n\n\n\n\nTip\n\n\n\nFun fact: Axolotl synonymously refers to “chat” messages as conversation messages due to how FastChat initially used this term to build a widely used fastchat conversation method for formatting chat messages prior to the creation of chat_templates.\n\n\n\nWhat are chat_templates?\nThe current most popular and convenient method for inference is to use chat_templates for formatting prompts. Axolotl supports using chat_templates for training to ensure that the model performs in the same environment as in inference.\nHere’s a quick rundown on chat_template: A chat_template is a Jinja2 template which formats a list of messages into a prompt.\nAn example of a prompt formatted into a popular template called ChatML can be seen below:\nSingle prompt (pretty-printed):\n{\n    \"messages\": [\n        {\n            \"role\": \"user\",\n            \"content\": \"Hi\"\n        },\n        {\n            \"role\": \"assistant\",\n            \"content\": \"How can I help you?\"\n        },\n        {\n            \"role\": \"user\",\n            \"content\": \"Can you add 3+5?\"\n        },\n        {\n            \"role\": \"assistant\",\n            \"content\": \"The answer is 8.\"\n        }\n    ]\n}\nThe ChatML template is as follows:\n{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\\n' }}{% endif %}\nThe above prompt formatted into this template will result in:\n<|im_start|>user\nHi<|im_end|>\n<|im_start|>assistant\nHow can I help you?<|im_end|>\n<|im_start|>user\nCan you add 3+5?<|im_end|>\n<|im_start|>assistant\nThe answer is 8.<|im_end|>\nBy using delimiters (<|im_start|> and <|im_end|>), a prompt separates different speakers which helps the model identify which portion belongs to whom.\n\n\nCommon Conversation Dataset formats\nOlder conversation datasets with the following format are colloquially called sharegpt datasets.\n{\"conversations\": [{\"from\": \"...\", \"value\": \"...\"}]}\nNewer conversation datasets usually follow the OpenAI format.\n{\"messages\": [{\"role\": \"...\", \"content\": \"...\"}]}\nAxolotl supports both as well as allowing customization of any kind of key.\n\n\nChat Template Usage\nTo properly use this method, it is important to identify three things:\n\nWhich chat_template would you use?\nWhat are the keys in your dataset, and what are the possible roles? For example, in OpenAI format, the keys would be messages, role, and content, respectively, whereas the possible roles are system, user, and assistant.\nWhat do you want to mask? For instance, only assistant messages, only last message, or nothing.\n\n\nChoosing a chat_template\nThere are a lot of chat_templates out there. Axolotl supports the common ones: supported chat templates. For example, to use ChatML, it would be chat_template: chatml.\nHowever, it is also possible to use the already configured template within the tokenizer by specifying chat_template: tokenizer_default. If you want a fallback (in case some tokenizer does not have it pre-configured), you can do chat_template: tokenizer_default_fallback_chatml to fallback to the ChatML template if a tokenizer template was not found.\nOne last but powerful approach is to bring your own template. This can be set via:\nchat_template_jinja: # your template\n\n\nSetting chat_template dataset keys\nWe currently default to OpenAI format for dataset keys, so if that’s your current dataset format, there’s nothing to do here.\nIf your dataset format is different, here are the keys you should check (with their defaults):\ndatasets:\n    ...\n    field_messages: messages\n    message_field_role: role\n    message_field_content: content\nIn some chat_templates (e.g. Gemma), the roles are hardcoded to user and assistant. Consequently, you may find it necessary to map the roles in your dataset to these above. We currently have some defaults that should work for common datasets, but if you get a KeyError, it would be necessary to add mapping for your roles. Here is an example of how it would look like:\ndatasets:\n    ...\n    roles:\n      assistant:\n        - gpt\n        - model\n      user:\n        - human\nIn the example above, all gpt and model values are converted to assistant. All human values are converted to user.\n\n\nHandling masking\nThe common use case for chat_template is for chat messages, therefore, it is common to mask all non-assistant messages. Assistant messages refer to the bot messages that you want the model to learn on.\nTo train on all assistant messages, you would set the following configs.\ndatasets:\n    ...\n    roles_to_train: [\"assistant\"]\n    train_on_eos: \"turn\"\nThe train_on_eos config means that it would mask all EOS tokens for turns that aren’t assistant-turns. The other options are: all and last to choose which EOS to train on.\nPerhaps, you want to train on assistant and narrator roles, you can simply add narrator to the list of roles_to_train. You would also need to add it to the mapping of roles above.\ndatasets:\n    ...\n    roles_to_train: [\"assistant\", \"narrator\"]\n    roles:\n      assistant:\n        - gpt\n        - model\n      user:\n        - human\n      narrator: [\"narrator\"]\n\n\n\nApplying chat_template\nOnce all the above steps are completed, you could combine all these configs together to form a bespoke configuration for your custom dataset. The final step would be to correctly set the EOS token in your config:\ndatasets:\n  - path: A.jsonl\n    type: chat_template\n\n    # step 1\n    chat_template: chatml\n\n    # step 2\n    field_messages: messages\n    message_field_role: role\n    message_field_content: content\n\n    roles:\n      assistant:\n        - gpt\n        - model\n        - assistant\n      user:\n        - human\n        - user\n\n    # step 3\n    roles_to_train: [\"assistant\"]\n    train_on_eos: \"turn\"\n\nspecial_tokens:\n  eos_token: <|im_end|>\nIf this config were to be applied to the sample dataset above, the output would look as such (which can be retrieved via axolotl preprocess config.yaml --debug):\n<|im_start|>(-100, 128256) user(-100, 882)\n(-100, 198) Hi(-100, 13347) <|im_end|>(-100, 128257)\n(-100, 198) <|im_start|>(-100, 128256) assistant(-100, 78191)\n(-100, 198) How(4438, 4438)  can(649, 649)  I(358, 358)  help(1520, 1520)  you(499, 499) ?(30, 30) <|im_end|>(128257, 128257)\n(-100, 198) <|im_start|>(-100, 128256) user(-100, 882)\n(-100, 198) Can(-100, 6854)  you(-100, 499)  add(-100, 923)  (-100, 220) 3(-100, 18) +(-100, 10) 5(-100, 20) ?(-100, 30) <|im_end|>(-100, 128257)\n(-100, 198) <|im_start|>(-100, 128256) assistant(-100, 78191)\n(-100, 198) The(791, 791)  answer(4320, 4320)  is(374, 374)  (220, 220) 8(23, 23) .(13, 13) <|im_end|>(128257, 128257)\n(-100, 198)\nThe first number refers to the label, the second refers to the token_id. For example, -100 labels appear on non-assistant portions, meaning that they are masked during. For assistant portions, the label is the same as the token_id.\n\n\n\nInstruction Dataset\nInstruction datasets are used to train instruction-following models and comprise a prompt, containing an instruction, and a single response. In contrast to chat datasets which may be multi-turn, instruct datasets are typically single-turn.\nAn example is of a common format called Alpaca:\n{\"instruction\": \"...\", \"input\": \"...\", \"output\": \"...\"}\nUsing those keys, a prompt can be built based on it.\nBelow is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n{output}\nThis can be configured as such:\ndatasets:\n  - path: A.jsonl\n    type: alpaca\nAxolotl supports many kinds of instruction dataset. All of them can be found here (https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/inst_tune.html) with their respective type and sample row format.\n\nCustom Instruct Prompt Format\nDue to the myriad possibilities of instruction formats, Axolotl allows customizing your own instruction format without having to dive into the code directly.\nIn the example below, a sample row is used to output in mistral_v1 format.\n{\"input\": \"...\", \"output\": \"...\"}\ndatasets:\n  - path: repo\n    type:\n      system_prompt: \"\"\n\n      field_system:\n      field_instruction: input\n      field_input:\n      field_output: output\n\n      # multi-line example with input\n      format: |-\n        [INST] {instruction} {input} [/INST]\n\n      # single-line example without input\n      no_input_format: \"[INST] {instruction} [/INST]\"\nThe config sets that the field_instruction is actually named input, and the field_input is empty as we don’t have an input in this sample. Generally, instruction can be thought as the question to the model, and input as the additional information with output being the response. It is not necessary to have an input nor system. In the end, the most important part is to understand what format you want it to look like and how you can customize this to your use case.",
    +    "text": "Supervised fine-tuning (SFT)\nSupervised fine-tuning is the process of training models to respond to an instruction or chat input.\nAs there are a wide variety of dataset formats, Axolotl tries to support a majority of the formats available in public datasets.\nAxolotl provides four approaches for loading datasets, however, it’s easier to work backwards from the dataset you have available to figure out which approach to use.\nA flow chart is as follows:\n\nDo you already have the dataset tokenized? If yes, check Pre-Tokenized Dataset.\nDo you want to format the dataset yourself and manually choose each section to mask? If yes, check Template Free Dataset\nIs your dataset in a “conversation” format, containing a list[messages]? If yes, check Conversation Dataset\nIs your dataset in an “instruct” format, containing { instruction, response }? If yes, check Instruction Dataset\n\nIf you went through the flow chart and did not find one that matches, it is recommended to preprocess your dataset into one of the above or create a thread on Github Discussion.\n\n\n\n\n\n\nTip\n\n\n\nYou can mix and match within each approach or across approaches to train a model on a variety of datasets.\n\n\n\nPre-Tokenized Dataset\nWe suggest this approach when you want to bring your own tokenized dataset.\nAxolotl expects the dataset to have three keys: - input_ids: from tokenizing formatted prompt - attention_mask: for masking padding. If you don’t add padding, it would be equal to len(input_ids) * [1] - labels: this is the same as input_ids, however, if you want to mask certain tokens, you would set those indices to -100.\n\n\n\n\n\n\nTip\n\n\n\nMake sure to add BOS/EOS tokens to your prompt and mask it appropriately.\n\n\nA config for this would look like:\ndatasets:\n  - path: A.jsonl\n    type:\n\n\n\n\n\n\nNote\n\n\n\ntype: is empty!\n\n\n\n\nTemplate Free Dataset\nWe reccomend this approach when you want granular control over the prompt formatting, special tokens, and masking, whilst letting Axolotl handle the tokenization. This is very useful if your dataset has unique prompts that differ across samples and where one single general template wouldn’t suffice.\nIn the example below, you could see that there is no proper structure. At the same time, it’s very flexible as there are no constraints on how your prompt can look.\n{\n    \"segments\": [\n        {\n            \"label\": true,\n            \"text\": \"<s>Hello\\n\"\n        },\n        {\n            \"label\": true,\n            \"text\": \"hi there!. \"\n        },\n        {\n            \"label\": false,\n            \"text\": \"goodbye \"\n        },\n        {\n            \"label\": true,\n            \"text\": \"farewell</s>\"\n        }\n    ]\n}\nEach prompt must be have a key called segments which is a list of { text, label }.\ndatasets:\n  - path: A.jsonl\n    type: input_output\n\n\nConversation Dataset\nconversation messages are a list of messages which usually contain a role and content key.\n\n\n\n\n\n\nTip\n\n\n\nFun fact: Axolotl synonymously refers to “chat” messages as conversation messages due to how FastChat initially used this term to build a widely used fastchat conversation method for formatting chat messages prior to the creation of chat_templates.\n\n\n\nWhat are chat_templates?\nThe current most popular and convenient method for inference is to use chat_templates for formatting prompts. Axolotl supports using chat_templates for training to ensure that the model performs in the same environment as in inference.\nHere’s a quick rundown on chat_template: A chat_template is a Jinja2 template which formats a list of messages into a prompt.\nAn example of a prompt formatted into a popular template called ChatML can be seen below:\nSingle prompt (pretty-printed):\n{\n    \"messages\": [\n        {\n            \"role\": \"user\",\n            \"content\": \"Hi\"\n        },\n        {\n            \"role\": \"assistant\",\n            \"content\": \"How can I help you?\"\n        },\n        {\n            \"role\": \"user\",\n            \"content\": \"Can you add 3+5?\"\n        },\n        {\n            \"role\": \"assistant\",\n            \"content\": \"The answer is 8.\"\n        }\n    ]\n}\nThe ChatML template is as follows:\n{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\\n' }}{% endif %}\nThe above prompt formatted into this template will result in:\n<|im_start|>user\nHi<|im_end|>\n<|im_start|>assistant\nHow can I help you?<|im_end|>\n<|im_start|>user\nCan you add 3+5?<|im_end|>\n<|im_start|>assistant\nThe answer is 8.<|im_end|>\nBy using delimiters (<|im_start|> and <|im_end|>), a prompt separates different speakers which helps the model identify which portion belongs to whom.\n\n\nCommon Conversation Dataset formats\nOlder conversation datasets with the following format are colloquially called sharegpt datasets.\n{\"conversations\": [{\"from\": \"...\", \"value\": \"...\"}]}\nNewer conversation datasets usually follow the OpenAI format.\n{\"messages\": [{\"role\": \"...\", \"content\": \"...\"}]}\nAxolotl supports both as well as allowing customization of any kind of key.\n\n\nChat Template Usage\nTo properly use this method, it is important to identify three things:\n\nWhich chat_template would you use?\nWhat are the keys in your dataset, and what are the possible roles? For example, in OpenAI format, the keys would be messages, role, and content, respectively, whereas the possible roles are system, user, and assistant.\nWhat do you want to mask? For instance, only assistant messages, only last message, or nothing.\n\n\nChoosing a chat_template\nThere are a lot of chat_templates out there. Axolotl supports the common ones: supported chat templates. For example, to use ChatML, it would be chat_template: chatml.\nHowever, it is also possible to use the already configured template within the tokenizer by specifying chat_template: tokenizer_default. If you want a fallback (in case some tokenizer does not have it pre-configured), you can do chat_template: tokenizer_default_fallback_chatml to fallback to the ChatML template if a tokenizer template was not found.\nOne last but powerful approach is to bring your own template. This can be set via:\nchat_template_jinja: # your template\n\n\nSetting chat_template dataset keys\nWe currently default to OpenAI format for dataset keys, so if that’s your current dataset format, there’s nothing to do here.\nIf your dataset format is different, here are the keys you should check (with their defaults):\ndatasets:\n    ...\n    field_messages: messages  # this should point to the key containing the list of conversations\n    message_property_mappings:  # this is a mapping from keys in your dataset to keys in chat_template\n      role: role\n      content: content\nIn some chat_templates (e.g. Gemma), the roles are hardcoded to user and assistant. Consequently, you may find it necessary to map the roles in your dataset to these above. We currently have some defaults that should work for common datasets, but if you get a KeyError, it would be necessary to add mapping for your roles. Here is an example of how it would look like:\ndatasets:\n    ...\n    roles:\n      assistant:\n        - gpt\n        - model\n      user:\n        - human\nIn the example above, all gpt and model values are converted to assistant. All human values are converted to user.\n\n\nHandling masking\nThe common use case for chat_template is for chat messages, therefore, it is common to mask all non-assistant messages. Assistant messages refer to the bot messages that you want the model to learn on.\nTo train on all assistant messages, you would set the following configs.\ndatasets:\n    ...\n    roles_to_train: [\"assistant\"]\n    train_on_eos: \"turn\"\nThe train_on_eos config means that it would mask all EOS tokens for turns that aren’t assistant-turns. The other options are: all and last to choose which EOS to train on.\nPerhaps, you want to train on assistant and narrator roles, you can simply add narrator to the list of roles_to_train. You would also need to add it to the mapping of roles above.\ndatasets:\n    ...\n    roles_to_train: [\"assistant\", \"narrator\"]\n    roles:\n      assistant:\n        - gpt\n        - model\n      user:\n        - human\n      narrator: [\"narrator\"]\n\n\n\nApplying chat_template\nOnce all the above steps are completed, you could combine all these configs together to form a bespoke configuration for your custom dataset. The final step would be to correctly set the EOS token in your config:\ndatasets:\n  - path: A.jsonl\n    type: chat_template\n\n    # step 1\n    chat_template: chatml\n\n    # step 2\n    field_messages: messages\n    message_property_mappings:\n      role: role\n      content: content\n\n    roles:\n      assistant:\n        - gpt\n        - model\n        - assistant\n      user:\n        - human\n        - user\n\n    # step 3\n    roles_to_train: [\"assistant\"]\n    train_on_eos: \"turn\"\n\nspecial_tokens:\n  eos_token: <|im_end|>\nIf this config were to be applied to the sample dataset above, the output would look as such (which can be retrieved via axolotl preprocess config.yaml --debug):\n<|im_start|>(-100, 128256) user(-100, 882)\n(-100, 198) Hi(-100, 13347) <|im_end|>(-100, 128257)\n(-100, 198) <|im_start|>(-100, 128256) assistant(-100, 78191)\n(-100, 198) How(4438, 4438)  can(649, 649)  I(358, 358)  help(1520, 1520)  you(499, 499) ?(30, 30) <|im_end|>(128257, 128257)\n(-100, 198) <|im_start|>(-100, 128256) user(-100, 882)\n(-100, 198) Can(-100, 6854)  you(-100, 499)  add(-100, 923)  (-100, 220) 3(-100, 18) +(-100, 10) 5(-100, 20) ?(-100, 30) <|im_end|>(-100, 128257)\n(-100, 198) <|im_start|>(-100, 128256) assistant(-100, 78191)\n(-100, 198) The(791, 791)  answer(4320, 4320)  is(374, 374)  (220, 220) 8(23, 23) .(13, 13) <|im_end|>(128257, 128257)\n(-100, 198)\nThe first number refers to the label, the second refers to the token_id. For example, -100 labels appear on non-assistant portions, meaning that they are masked during. For assistant portions, the label is the same as the token_id.\n\n\n\nInstruction Dataset\nInstruction datasets are used to train instruction-following models and comprise a prompt, containing an instruction, and a single response. In contrast to chat datasets which may be multi-turn, instruct datasets are typically single-turn.\nAn example is of a common format called Alpaca:\n{\"instruction\": \"...\", \"input\": \"...\", \"output\": \"...\"}\nUsing those keys, a prompt can be built based on it.\nBelow is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n{output}\nThis can be configured as such:\ndatasets:\n  - path: A.jsonl\n    type: alpaca\nAxolotl supports many kinds of instruction dataset. All of them can be found here (https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/inst_tune.html) with their respective type and sample row format.\n\nCustom Instruct Prompt Format\nDue to the myriad possibilities of instruction formats, Axolotl allows customizing your own instruction format without having to dive into the code directly.\nIn the example below, a sample row is used to output in mistral_v1 format.\n{\"input\": \"...\", \"output\": \"...\"}\ndatasets:\n  - path: repo\n    type:\n      system_prompt: \"\"\n\n      field_system:\n      field_instruction: input\n      field_input:\n      field_output: output\n\n      # multi-line example with input\n      format: |-\n        [INST] {instruction} {input} [/INST]\n\n      # single-line example without input\n      no_input_format: \"[INST] {instruction} [/INST]\"\nThe config sets that the field_instruction is actually named input, and the field_input is empty as we don’t have an input in this sample. Generally, instruction can be thought as the question to the model, and input as the additional information with output being the response. It is not necessary to have an input nor system. In the end, the most important part is to understand what format you want it to look like and how you can customize this to your use case.",
         "crumbs": [
           "Dataset Formats"
         ]
    @@ -1105,7 +1105,7 @@
         "href": "docs/dataset-formats/conversation.html#chat_template",
         "title": "Conversation",
         "section": "chat_template",
    -    "text": "chat_template\nChat Template strategy uses a jinja2 template that converts a list of messages into a prompt. Support using tokenizer’s template, a supported template, or custom jinja2.\n\n\ndata.jsonl\n\n{\"conversations\": [{\"role\": \"...\", \"content\": \"...\"}]}\n\nSee configs for full configs and supported templates.\n\nMigrating from sharegpt\nMost configs can be adapted as follows:\n# old\nchat_template: chatml\ndatasets:\n  - path: ...\n    type: sharegpt\n    conversation: chatml\n\n# new (if using tokenizer's chat_template)\ndatasets:\n  - path: ...\n    type: chat_template\n\n    field_messages: conversations\n    message_field_role: from\n    message_field_content: value\n\n# new (if setting a new chat_template like chatml, gemma, etc)\nchat_template: chatml\ndatasets:\n  - path: ...\n    type: chat_template\n\n    field_messages: conversations\n    message_field_role: from\n    message_field_content: value\nWe recommend checking the below examples for other usecases.\n\n\nExamples\n\nUsing the default chat template in the tokenizer_config.json on OpenAI messages format, training on only last message.\n\ndatasets:\n  - path: ...\n    type: chat_template\n    roles_to_train:\n    train_on_eos:\n\nUsing the gemma chat template to override the tokenizer_config.json’s chat template on OpenAI messages format, training on all assistant messages.\n\nchat_template: gemma # this overwrites the tokenizer's chat_template\ndatasets:\n  - path: ...\n    type: chat_template\n    roles_to_train: [\"assistant\"]  # default value\n\nUsing the tokenizer_config.json’s chat template or chatml as fallback if the former’s chat template does not exist, on OpenAI messages format, training on all assistant messages.\n\nchat_template: tokenizer_default_fallback_chatml # this overwrites the tokenizer's chat_template\ndatasets:\n  - path: ...\n    type: chat_template\n\nUsing a custom jinja template on OpenAI messages format, training on all assistant messages.\n\n# chat_template: jinja # `jinja` will be implied if the `chat_template_jinja` is set and this field is empty\nchat_template_jinja: \"{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'system') %}{{'<|system|>' + '\\n' + message['content'] + '<|end|>' + '\\n'}}{% elif (message['role'] == 'user') %}{{'<|user|>' + '\\n' + message['content'] + '<|end|>' + '\\n' + '<|assistant|>' + '\\n'}}{% elif message['role'] == 'assistant' %}{{message['content'] + '<|end|>' + '\\n'}}{% endif %}{% endfor %}\"\n\ndatasets:\n  - path: ...\n    type: chat_template\n\n(Advanced) Using fine-grained control over tokens and turns to train in a conversation\n\nFor a data sample that looks like:\n\n\ndata.jsonl\n\n{\n  \"conversations\": [\n    {\"from\": \"system\", \"value\": \"You are an AI assistant.\", \"train\": false},\n    {\"from\": \"human\", \"value\": \"Hello\", \"train\": false},\n    {\"from\": \"assistant\", \"value\": \"Hello\", \"train\": true},\n    {\"from\": \"human\", \"value\": \"How are you?\", \"train\": true},\n    {\n      \"from\": \"assistant\",\n      \"value\": \"I'm doing very well, thank you!\",\n      \"train_detail\": [\n        {\"begin_offset\": 0, \"end_offset\": 8, \"train\": false},\n        {\"begin_offset\": 9, \"end_offset\": 18, \"train\": true},\n        {\"begin_offset\": 19, \"end_offset\": 30, \"train\": false},\n      ],\n    },\n    {\n        \"from\": \"human\",\n        \"value\": \"I'm doing very well, thank you!\",\n        \"train\": true,\n    },\n    {\"from\": \"assistant\", \"value\": \"Hi there!\", \"train\": true}\n  ]\n}\n\nThe configuration would look like:\ndatasets:\n  - path: ...\n    type: chat_template\n    chat_template: tokenizer_default\n    field_messages: conversations\n    message_field_role: from\n    message_field_content: value\n    roles_to_train: []\n    train_on_eos: turn\n    message_field_training: train\n    message_field_training_detail: train_detail\nTip: It is not necessary to use both message_field_training and message_field_training_detail at a time.",
    +    "text": "chat_template\nChat Template strategy uses a jinja2 template that converts a list of messages into a prompt. Support using tokenizer’s template, a supported template, or custom jinja2.\n\n\ndata.jsonl\n\n{\"conversations\": [{\"role\": \"...\", \"content\": \"...\"}]}\n\nSee configs for full configs and supported templates.\n\nMigrating from sharegpt\nMost configs can be adapted as follows:\n# old\nchat_template: chatml\ndatasets:\n  - path: ...\n    type: sharegpt\n    conversation: chatml\n\n# new (if using tokenizer's chat_template)\ndatasets:\n  - path: ...\n    type: chat_template\n\n    field_messages: conversations\n    message_property_mappings:\n      role: from\n      content: value\n\n# new (if setting a new chat_template like chatml, gemma, etc)\nchat_template: chatml\ndatasets:\n  - path: ...\n    type: chat_template\n\n    field_messages: conversations\n    message_property_mappings:\n      role: from\n      content: value\nWe recommend checking the below examples for other usecases.\n\n\nExamples\n\nUsing the default chat template in the tokenizer_config.json on OpenAI messages format, training on only last message.\n\ndatasets:\n  - path: ...\n    type: chat_template\n    roles_to_train:\n    train_on_eos:\n\nUsing the gemma chat template to override the tokenizer_config.json’s chat template on OpenAI messages format, training on all assistant messages.\n\nchat_template: gemma # this overwrites the tokenizer's chat_template\ndatasets:\n  - path: ...\n    type: chat_template\n    roles_to_train: [\"assistant\"]  # default value\n\nUsing the tokenizer_config.json’s chat template or chatml as fallback if the former’s chat template does not exist, on OpenAI messages format, training on all assistant messages.\n\nchat_template: tokenizer_default_fallback_chatml # this overwrites the tokenizer's chat_template\ndatasets:\n  - path: ...\n    type: chat_template\n\nUsing a custom jinja template on OpenAI messages format, training on all assistant messages.\n\n# chat_template: jinja # `jinja` will be implied if the `chat_template_jinja` is set and this field is empty\nchat_template_jinja: \"{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'system') %}{{'<|system|>' + '\\n' + message['content'] + '<|end|>' + '\\n'}}{% elif (message['role'] == 'user') %}{{'<|user|>' + '\\n' + message['content'] + '<|end|>' + '\\n' + '<|assistant|>' + '\\n'}}{% elif message['role'] == 'assistant' %}{{message['content'] + '<|end|>' + '\\n'}}{% endif %}{% endfor %}\"\n\ndatasets:\n  - path: ...\n    type: chat_template\n\n(Advanced) Using fine-grained control over tokens and turns to train in a conversation\n\nFor a data sample that looks like:\n\n\ndata.jsonl\n\n{\n  \"conversations\": [\n    {\"from\": \"system\", \"value\": \"You are an AI assistant.\", \"train\": false},\n    {\"from\": \"human\", \"value\": \"Hello\", \"train\": false},\n    {\"from\": \"assistant\", \"value\": \"Hello\", \"train\": true},\n    {\"from\": \"human\", \"value\": \"How are you?\", \"train\": true},\n    {\n      \"from\": \"assistant\",\n      \"value\": \"I'm doing very well, thank you!\",\n      \"train_detail\": [\n        {\"begin_offset\": 0, \"end_offset\": 8, \"train\": false},\n        {\"begin_offset\": 9, \"end_offset\": 18, \"train\": true},\n        {\"begin_offset\": 19, \"end_offset\": 30, \"train\": false},\n      ],\n    },\n    {\n        \"from\": \"human\",\n        \"value\": \"I'm doing very well, thank you!\",\n        \"train\": true,\n    },\n    {\"from\": \"assistant\", \"value\": \"Hi there!\", \"train\": true}\n  ]\n}\n\nThe configuration would look like:\ndatasets:\n  - path: ...\n    type: chat_template\n    chat_template: tokenizer_default\n    field_messages: conversations\n    message_property_mappings:\n      role: from\n      content: value\n    roles_to_train: []\n    train_on_eos: turn\n    message_field_training: train\n    message_field_training_detail: train_detail\nTip: It is not necessary to use both message_field_training and message_field_training_detail at a time.",
         "crumbs": [
           "Dataset Formats",
           "Conversation"
    @@ -1255,7 +1255,7 @@
         "href": "docs/config.html",
         "title": "Config options",
         "section": "",
    -    "text": "# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files\n# This can also be a relative path to a model on disk\nbase_model: ./llama-7b-hf\n# You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)\nbase_model_ignore_patterns:\n# If the base_model repo on hf hub doesn't include configuration .json files,\n# You can set that here, or leave this empty to default to base_model\nbase_model_config: ./llama-7b-hf\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model:\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config:\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too\nmodel_type: AutoModelForCausalLM\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: AutoTokenizer\n# Trust remote code for untrusted source\ntrust_remote_code:\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast:\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy:\n# Resize the model embeddings when new tokens are added to multiples of 32\n# This is reported to improve training speed on some models\nresize_token_embeddings_to_32x:\n\n# (Internal use only)\n# Used to identify which the model is based on\nis_falcon_derived_model:\nis_llama_derived_model:\nis_qwen_derived_model:\n# Please note that if you set this to true, `padding_side` will be set to \"left\" by default\nis_mistral_derived_model:\n\n# optional overrides to the base model configuration\noverrides_of_model_config:\n  # RoPE Scaling https://github.com/huggingface/transformers/pull/24653\n  rope_scaling:\n    type: # linear | dynamic\n    factor: # float\n\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs:\n  # use_cache: False\n\n# optional overrides to the bnb 4bit quantization configuration\n# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig\nbnb_config_kwargs:\n  # These are default values\n  llm_int8_has_fp16_weight: false\n  bnb_4bit_quant_type: nf4\n  bnb_4bit_use_double_quant: true\n\n\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: true\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: true\n# Use bitsandbytes 4 bit\nload_in_4bit:\n\n# Use CUDA bf16\nbf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere\n# Use CUDA fp16\nfp16: true\n# Use CUDA tf32\ntf32: true # require >=ampere\n\n# No AMP (automatic mixed precision)\nbfloat16: true # require >=ampere\nfloat16: true\n\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset\ngpu_memory_limit: 20GiB\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: true\n\n# A list of one or more datasets to finetune the model with\ndatasets:\n  # HuggingFace dataset repo | s3://,gs:// path | \"json\" for local dataset, make sure to fill data_files\n  - path: vicgalle/alpaca-gpt4\n    # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n    type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>\n    ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file\n    data_files: # Optional[str] path to source data files\n\n    shards: # Optional[int] split dataset into N pieces (use with shards_idx)\n    shards_idx: # Optional[int] = 0 the index of sharded dataset to use\n\n    preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)\n\n    name: # Optional[str] name of dataset configuration to load\n    train_on_split: train # Optional[str] name of dataset split to load from\n    revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets.\n    trust_remote_code: # Optional[bool] Trust remote code for untrusted source\n\n  # Custom user instruction prompt\n  - path: repo\n    type:\n      # The below are defaults. only set what's needed if you use a different column name.\n      system_prompt: \"\"\n      system_format: \"{system}\"\n      field_system: system\n      field_instruction: instruction\n      field_input: input\n      field_output: output\n\n      # Customizable to be single line or multi-line\n      # Use {instruction}/{input} as key to be replaced\n      # 'format' can include {input}\n      format: |-\n        User: {instruction} {input}\n        Assistant:\n      # 'no_input_format' cannot include {input}\n      no_input_format: \"{instruction} \"\n\n      # For `completion` datsets only, uses the provided field instead of `text` column\n      field:\n\n  # Using chat template\n  - path: ...\n    # Set type to `chat_template` to use this strategy\n    type: chat_template\n    # Specify the name of the chat template to use\n    # The name of the chat template to use for training, following values are supported:\n    # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default.\n    # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n    # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml.\n    # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n    chat_template: tokenizer_default\n\n    # Custom jinja chat template. Used only if `chat_template: jinja` or empty.\n    chat_template_jinja:\n\n    # Key containing the messages (default: \"messages\")\n    field_messages: messages\n    # Key for role in each message (default: \"role\")\n    message_field_role: role\n    # Key for content in each message (default:  \"content\")\n    message_field_content: content\n\n    # Optional[Dict[str, List]]. Roles mapping in the messages. The default is:\n    roles:\n      user: [\"human\", \"user\"]\n      assistant: [\"gpt\", \"assistant\"]\n      system: [\"system\"]\n      tool: [\"tool\"]\n\n    # IMPORTANT: The following fields determine which parts of the conversation to train on.\n    # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train\n    # See examples at `docs/dataset-formats/conversation.qmd`\n    # Note: If the below 4 fields are empty, defaults to training only on the last message.\n\n    # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.\n    roles_to_train: [\"assistant\"]  # default\n    # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are:\n    # - all: train on all EOS tokens\n    # - turn (default): train on the EOS token at the end of each trainable turn\n    # - last: train on the last EOS token in the conversation\n    train_on_eos: last\n    # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.\n    message_field_training: training\n    # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.\n    # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train).\n    message_field_training_detail: train_detail\n\n\n# If false, the datasets will not be shuffled and will keep their original order in `datasets`.\n# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: true\n\nDeduplicates datasets and test_datasets with identical entries.\ndataset_exact_deduplication: true\n\n# A list of one or more datasets to eval the model with.\n# You can use either test_datasets, or val_set_size, but not both.\ntest_datasets:\n  - path: /workspace/data/eval.jsonl\n    ds_type: json\n    # You need to specify a split. For \"json\" datasets the default split is called \"train\".\n    split: train\n    type: completion\n    data_files:\n      - /workspace/data/eval.jsonl\n\n# use RL training: 'dpo', 'ipo', 'kto'\nrl:\n# whether to perform weighting if doing DPO training. Boolean.\ndpo_use_weighting:\n\n# reward modelling: `True` or `False`\nreward_model:\n\n# process reward modelling: `True` or `False`\nprocess_reward_model:\n\n# The name of the chat template to use for training, following values are supported:\n# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value.\n# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer.\n# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n# The selected chat template will be saved to the tokenizer_config.json for easier inferencing\n# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template.\nchat_template: tokenizer_default\n# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.\nchat_template_jinja: null\n# Changes the default system message\ndefault_system_message: You are a helpful assistant. Please give a long and detailed answer. # Currently only supports chatml.\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: data/last_run_prepared\n# Push prepared dataset to hub\npush_dataset_to_hub: # repo path\n# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`\n# if not set.\ndataset_processes: # defaults to os.cpu_count() if not set\n# Keep dataset in memory while preprocessing\n# Only needed if cached dataset is taking too much storage\ndataset_keep_in_memory:\n# push checkpoints to hub\nhub_model_id: # private repo path to push finetuned model\n# how to push checkpoints to hub\n# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy\nhub_strategy:\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets\n# Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: # boolean\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.\nval_set_size: 0.04\n# Num shards for whole dataset\ndataset_shard_num:\n# Index of shard to use for whole dataset\ndataset_shard_idx:\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: 2048\n# Pad inputs so each step uses constant sized buffers\n# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently\npad_to_sequence_len:\n# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'\nsample_packing:\n# Set to 'false' if getting errors during eval with sample_packing on.\neval_sample_packing:\n# You can set these packing optimizations AFTER starting a training at least once.\n# The trainer will provide recommended values for these values.\nsample_packing_eff_est:\ntotal_num_tokens:\n# Increasing the following values helps with packing, but usually only slightly (<%1.)\n# The number of samples packed at a time.\nsample_packing_group_size: 100000\n# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples.\nsample_packing_bin_size: 200\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation:\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening:\n\n# Passed through to transformers when loading the model when launched without accelerate\n# Use `sequential` when training w/ model parallelism to limit memory\ndevice_map:\n# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.\nmax_memory:\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model\nadapter: lora\n# If you already have a lora model trained that you want to load, put that here.\n# This means after training, if you want to test the model, you should set this to the value of `output_dir`.\n# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir:\n\n# LoRA hyperparameters\n# For more details about the following options, see:\n# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2\nlora_r: 8\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_modules:\n  - q_proj\n  - v_proj\n#  - k_proj\n#  - o_proj\n#  - gate_proj\n#  - down_proj\n#  - up_proj\nlora_target_linear: # If true, will target all linear modules\npeft_layers_to_transform: # The layer indices to transform, otherwise, apply to all layers\n\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.\n# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.\n# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\n# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994\nlora_modules_to_save:\n#  - embed_tokens\n#  - lm_head\n\nlora_fan_in_fan_out: false\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for\n# speed and memory savings\n# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html\nlora_mlp_kernel: true\nlora_qkv_kernel: true\nlora_o_kernel: true\n\n# LoRA+ hyperparameters\n# For more details about the following options, see:\n# https://arxiv.org/abs/2402.12354  and `src/axolotl/core/train_builder.py`\nloraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_embedding: #  loraplus learning rate for lora embedding layers. Default value is 1e-6.\n\npeft:\n  # Configuration options for loftq initialization for LoRA\n  # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization\n  loftq_config:\n    loftq_bits:  # typically 4 bits\n\n# ReLoRA configuration\n# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed\nrelora_steps: # Number of steps per ReLoRA restart\nrelora_warmup_steps: # Number of per-restart warmup steps\nrelora_anneal_steps: # Number of anneal steps for each relora cycle\nrelora_prune_ratio: # threshold for optimizer magnitude when pruning\nrelora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings\n\n# wandb configuration if you're using it\n# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.\nwandb_mode: # \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn off wandb\nwandb_project: # Your wandb project name\nwandb_entity: # A wandb Team name if using a Team\nwandb_watch:\nwandb_name: # Set the name of your wandb run\nwandb_run_id: # Set the ID of your wandb run\nwandb_log_model: # \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only at the end of training\n\n# mlflow configuration if you're using it\nmlflow_tracking_uri: # URI to mlflow\nmlflow_experiment_name: # Your experiment name\nmlflow_run_name: # Your run name\nhf_mlflow_log_artifacts:  # set to true to copy each saved checkpoint on each save to mlflow artifact registry\n\n# Comet configuration if you're using it\n# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`.\n# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start\nuse_comet: # Enable or disable Comet integration.\ncomet_api_key: # API key for Comet. Recommended to set via `comet login`.\ncomet_workspace: # Workspace name in Comet. Defaults to the user's default workspace.\ncomet_project_name: # Project name in Comet. Defaults to Uncategorized.\ncomet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key.\ncomet_mode: # Create a new experiment (\"create\") or log to an existing one (\"get\"). Default (\"get_or_create\") auto-selects based on configuration.\ncomet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True.\ncomet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details.\n\n# Tensorboard\nuse_tensorboard: # Optional[bool]\n\n# Where to save the full-finetuned model to\noutput_dir: ./completed-model\n\n# Whether to use torch.compile and which backend to use\n# setting to `auto` will enable torch compile when torch>=2.5.1\ntorch_compile:  # Optional[Union[Literal[\"auto\"], bool]]\ntorch_compile_backend:  # Optional[str]\n\n# Training hyperparameters\n\n# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.\ngradient_accumulation_steps: 1\n# The number of samples to include in each batch. This is the number of samples sent to each GPU.\n# Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: 2\neval_batch_size:\nnum_epochs: 4\nwarmup_steps: 100  # cannot use with warmup_ratio\nwarmup_ratio: 0.05  # cannot use with warmup_steps\nlearning_rate: 0.00003\nlr_quadratic_warmup:\nlogging_steps:\neval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps\nevals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps\neval_strategy: # Set to `\"no\"` to skip evaluation, `\"epoch\"` at end of each epoch, leave empty to infer from `eval_steps`.\nsave_strategy: # Set to `\"no\"` to skip checkpoint saves, `\"epoch\"` at end of each epoch, `\"best\"` when better result is achieved, leave empty to infer from `save_steps`.\nsave_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps\nsaves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsave_total_limit: # Checkpoints saved at a time\n# Maximum number of iterations to train for. It precedes num_epochs which means that\n# if both are set, num_epochs will not be guaranteed.\n# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps:\n\n# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second:\n\neval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0\neval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128\neval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is [\"sacrebleu\", \"comet\", \"ter\", \"chrf\", \"perplexity\"]\n\nprofiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir.\n                # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information\n                # snapshots can be visualized @ https://pytorch.org/memory_viz\n\nloss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)\nloss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)\n\n# Save model as safetensors (require safetensors package)\nsave_safetensors:\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: false\n# Group similarly sized data to minimize padding.\n# May be slower to start, as it must download and sort the entire dataset.\n# Note that training loss may have an oscillating pattern with this enabled.\ngroup_by_length: false\n\n# Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: false\n# additional kwargs to pass to the trainer for gradient checkpointing\n# gradient_checkpointing_kwargs:\n#   use_reentrant: true\n\n# Stop training after this many evaluation losses have increased in a row\n# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback\nearly_stopping_patience: 3\n\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler: # 'one_cycle' | 'log_sweep' | empty for cosine\nlr_scheduler_kwargs:\ncosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr\ncosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)\n\n# For one_cycle optim\nlr_div_factor: # Learning rate div factor\n\n# Specify optimizer\n# Valid values are driven by the Transformers OptimizerNames class, see:\n# https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134\n#\n# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of\n# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used\n# in the examples/ for your model and fine-tuning use case.\n#\n# Valid values for 'optimizer' include:\n# - adamw_hf\n# - adamw_torch\n# - adamw_torch_fused\n# - adamw_torch_xla\n# - adamw_apex_fused\n# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1)\n# - adafactor\n# - adamw_anyprecision\n# - sgd\n# - adagrad\n# - adamw_bnb_8bit\n# - lion_8bit\n# - lion_32bit\n# - paged_adamw_32bit\n# - paged_adamw_8bit\n# - paged_lion_32bit\n# - paged_lion_8bit\n# - galore_adamw\n# - galore_adamw_8bit\n# - galore_adafactor\n# - galore_adamw_layerwise\n# - galore_adamw_8bit_layerwise\n# - galore_adafactor_layerwise\noptimizer:\n# Dictionary of arguments to pass to the optimizer\noptim_args:\n# For Galore Optimizers the following optim_args are available\n# rank:  # type: int\n# update_proj_gap  # type: int\n# scale  # type: float\n# proj_type:  # type: str, default = std\n\n# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm\noptim_target_modules:\n# - self_attn  # for llama\n# - mlp\n\n# Specify weight decay\nweight_decay:\n# adamw hyperparams\nadam_beta1:\nadam_beta2:\nadam_epsilon:\n# Gradient clipping max norm\nmax_grad_norm:\n\n# Augmentation techniques\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings\n# currently only supported on Llama and Mistral\nneftune_noise_alpha:\n\n# Whether to bettertransformers\nflash_optimum:\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers:\nxformers_attention:\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:\nflash_attention:\nflash_attn_cross_entropy:  # Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_rms_norm:  # Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_fuse_qkv: # Whether to fuse QKV into a single operation\nflash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation\n# Whether to use scaled-dot-product attention\n# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention:\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention:\n# Resume from a specific checkpoint dir\nresume_from_checkpoint:\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: false\n\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank:\n\n# Add or change special tokens.\n# If you add tokens here, you don't need to add them to the `tokens` list.\nspecial_tokens:\n  # bos_token: \"<s>\"\n  # eos_token: \"</s>\"\n  # unk_token: \"<unk>\"\n  # pad_token: \"[PAD]\"\n\n# Add extra tokens.\ntokens:\n\n# FSDP\nfsdp:\nfsdp_config:\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed:\n\n# Advanced DDP Arguments\nddp_timeout:\nddp_bucket_cap_mb:\nddp_broadcast_buffers:\n\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path:\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset:\n\n# Debug mode\ndebug:\n\n# Seed\nseed:\n\n# Allow overwrite yml config using from cli\nstrict:",
    +    "text": "# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files\n# This can also be a relative path to a model on disk\nbase_model: ./llama-7b-hf\n# You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)\nbase_model_ignore_patterns:\n# If the base_model repo on hf hub doesn't include configuration .json files,\n# You can set that here, or leave this empty to default to base_model\nbase_model_config: ./llama-7b-hf\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model:\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config:\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too\nmodel_type: AutoModelForCausalLM\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: AutoTokenizer\n# Trust remote code for untrusted source\ntrust_remote_code:\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast:\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy:\n# Resize the model embeddings when new tokens are added to multiples of 32\n# This is reported to improve training speed on some models\nresize_token_embeddings_to_32x:\n\n# (Internal use only)\n# Used to identify which the model is based on\nis_falcon_derived_model:\nis_llama_derived_model:\nis_qwen_derived_model:\n# Please note that if you set this to true, `padding_side` will be set to \"left\" by default\nis_mistral_derived_model:\n\n# optional overrides to the base model configuration\noverrides_of_model_config:\n  # RoPE Scaling https://github.com/huggingface/transformers/pull/24653\n  rope_scaling:\n    type: # linear | dynamic\n    factor: # float\n\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs:\n  # use_cache: False\n\n# optional overrides to the bnb 4bit quantization configuration\n# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig\nbnb_config_kwargs:\n  # These are default values\n  llm_int8_has_fp16_weight: false\n  bnb_4bit_quant_type: nf4\n  bnb_4bit_use_double_quant: true\n\n\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: true\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: true\n# Use bitsandbytes 4 bit\nload_in_4bit:\n\n# Use CUDA bf16\nbf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere\n# Use CUDA fp16\nfp16: true\n# Use CUDA tf32\ntf32: true # require >=ampere\n\n# No AMP (automatic mixed precision)\nbfloat16: true # require >=ampere\nfloat16: true\n\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset\ngpu_memory_limit: 20GiB\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: true\n\n# A list of one or more datasets to finetune the model with\ndatasets:\n  # HuggingFace dataset repo | s3://,gs:// path | \"json\" for local dataset, make sure to fill data_files\n  - path: vicgalle/alpaca-gpt4\n    # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n    type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>\n    ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file\n    data_files: # Optional[str] path to source data files\n\n    shards: # Optional[int] split dataset into N pieces (use with shards_idx)\n    shards_idx: # Optional[int] = 0 the index of sharded dataset to use\n\n    preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)\n\n    name: # Optional[str] name of dataset configuration to load\n    train_on_split: train # Optional[str] name of dataset split to load from\n    revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets.\n    trust_remote_code: # Optional[bool] Trust remote code for untrusted source\n\n  # Custom user instruction prompt\n  - path: repo\n    type:\n      # The below are defaults. only set what's needed if you use a different column name.\n      system_prompt: \"\"\n      system_format: \"{system}\"\n      field_system: system\n      field_instruction: instruction\n      field_input: input\n      field_output: output\n\n      # Customizable to be single line or multi-line\n      # Use {instruction}/{input} as key to be replaced\n      # 'format' can include {input}\n      format: |-\n        User: {instruction} {input}\n        Assistant:\n      # 'no_input_format' cannot include {input}\n      no_input_format: \"{instruction} \"\n\n      # For `completion` datsets only, uses the provided field instead of `text` column\n      field:\n\n  # Using chat template\n  - path: ...\n    # Set type to `chat_template` to use this strategy\n    type: chat_template\n    # Specify the name of the chat template to use\n    # The name of the chat template to use for training, following values are supported:\n    # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default.\n    # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n    # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml.\n    # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n    chat_template: tokenizer_default\n\n    # Custom jinja chat template. Used only if `chat_template: jinja` or empty.\n    chat_template_jinja:\n\n    # Key containing the messages (default: \"messages\")\n    field_messages: messages\n\n    # Mapping of properties from the input dataset to the chat template.\n    # (default: message_property_mappings={'role':'role', 'content':'content'})\n    # If a property exists in the template but not in this mapping, the system will attempt\n    # to load it directly from the message using the property name as the key.\n    # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role',\n    # while 'value' is loaded and used as 'content' in the chat template.\n    message_property_mappings:\n      role: from\n      content: value\n      # ...\n\n    message_property_mappings:\n\n    # Optional[Dict[str, List]]. Roles mapping in the messages. The default is:\n    roles:\n      user: [\"human\", \"user\"]\n      assistant: [\"gpt\", \"assistant\"]\n      system: [\"system\"]\n      tool: [\"tool\"]\n\n    # IMPORTANT: The following fields determine which parts of the conversation to train on.\n    # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train\n    # See examples at `docs/dataset-formats/conversation.qmd`\n    # Note: If the below 4 fields are empty, defaults to training only on the last message.\n\n    # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.\n    roles_to_train: [\"assistant\"]  # default\n    # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are:\n    # - all: train on all EOS tokens\n    # - turn (default): train on the EOS token at the end of each trainable turn\n    # - last: train on the last EOS token in the conversation\n    train_on_eos: last\n    # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.\n    message_field_training: training\n    # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.\n    # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train).\n    message_field_training_detail: train_detail\n\n\n# If false, the datasets will not be shuffled and will keep their original order in `datasets`.\n# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: true\n\nDeduplicates datasets and test_datasets with identical entries.\ndataset_exact_deduplication: true\n\n# A list of one or more datasets to eval the model with.\n# You can use either test_datasets, or val_set_size, but not both.\ntest_datasets:\n  - path: /workspace/data/eval.jsonl\n    ds_type: json\n    # You need to specify a split. For \"json\" datasets the default split is called \"train\".\n    split: train\n    type: completion\n    data_files:\n      - /workspace/data/eval.jsonl\n\n# use RL training: 'dpo', 'ipo', 'kto'\nrl:\n# whether to perform weighting if doing DPO training. Boolean.\ndpo_use_weighting:\n\n# reward modelling: `True` or `False`\nreward_model:\n\n# process reward modelling: `True` or `False`\nprocess_reward_model:\n\n# The name of the chat template to use for training, following values are supported:\n# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value.\n# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer.\n# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n# The selected chat template will be saved to the tokenizer_config.json for easier inferencing\n# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template.\nchat_template: tokenizer_default\n# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.\nchat_template_jinja: null\n# Changes the default system message\ndefault_system_message: You are a helpful assistant. Please give a long and detailed answer. # Currently only supports chatml.\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: data/last_run_prepared\n# Push prepared dataset to hub\npush_dataset_to_hub: # repo path\n# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`\n# if not set.\ndataset_processes: # defaults to os.cpu_count() if not set\n# Keep dataset in memory while preprocessing\n# Only needed if cached dataset is taking too much storage\ndataset_keep_in_memory:\n# push checkpoints to hub\nhub_model_id: # private repo path to push finetuned model\n# how to push checkpoints to hub\n# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy\nhub_strategy:\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets\n# Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: # boolean\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.\nval_set_size: 0.04\n# Num shards for whole dataset\ndataset_shard_num:\n# Index of shard to use for whole dataset\ndataset_shard_idx:\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: 2048\n# Pad inputs so each step uses constant sized buffers\n# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently\npad_to_sequence_len:\n# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'\nsample_packing:\n# Set to 'false' if getting errors during eval with sample_packing on.\neval_sample_packing:\n# You can set these packing optimizations AFTER starting a training at least once.\n# The trainer will provide recommended values for these values.\nsample_packing_eff_est:\ntotal_num_tokens:\n# Increasing the following values helps with packing, but usually only slightly (<%1.)\n# The number of samples packed at a time.\nsample_packing_group_size: 100000\n# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples.\nsample_packing_bin_size: 200\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation:\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening:\n\n# Passed through to transformers when loading the model when launched without accelerate\n# Use `sequential` when training w/ model parallelism to limit memory\ndevice_map:\n# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.\nmax_memory:\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model\nadapter: lora\n# If you already have a lora model trained that you want to load, put that here.\n# This means after training, if you want to test the model, you should set this to the value of `output_dir`.\n# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir:\n\n# LoRA hyperparameters\n# For more details about the following options, see:\n# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2\nlora_r: 8\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_modules:\n  - q_proj\n  - v_proj\n#  - k_proj\n#  - o_proj\n#  - gate_proj\n#  - down_proj\n#  - up_proj\nlora_target_linear: # If true, will target all linear modules\npeft_layers_to_transform: # The layer indices to transform, otherwise, apply to all layers\n\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.\n# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.\n# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\n# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994\nlora_modules_to_save:\n#  - embed_tokens\n#  - lm_head\n\nlora_fan_in_fan_out: false\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for\n# speed and memory savings\n# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html\nlora_mlp_kernel: true\nlora_qkv_kernel: true\nlora_o_kernel: true\n\n# LoRA+ hyperparameters\n# For more details about the following options, see:\n# https://arxiv.org/abs/2402.12354  and `src/axolotl/core/train_builder.py`\nloraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_embedding: #  loraplus learning rate for lora embedding layers. Default value is 1e-6.\n\npeft:\n  # Configuration options for loftq initialization for LoRA\n  # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization\n  loftq_config:\n    loftq_bits:  # typically 4 bits\n\n# ReLoRA configuration\n# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed\nrelora_steps: # Number of steps per ReLoRA restart\nrelora_warmup_steps: # Number of per-restart warmup steps\nrelora_anneal_steps: # Number of anneal steps for each relora cycle\nrelora_prune_ratio: # threshold for optimizer magnitude when pruning\nrelora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings\n\n# wandb configuration if you're using it\n# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.\nwandb_mode: # \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn off wandb\nwandb_project: # Your wandb project name\nwandb_entity: # A wandb Team name if using a Team\nwandb_watch:\nwandb_name: # Set the name of your wandb run\nwandb_run_id: # Set the ID of your wandb run\nwandb_log_model: # \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only at the end of training\n\n# mlflow configuration if you're using it\nmlflow_tracking_uri: # URI to mlflow\nmlflow_experiment_name: # Your experiment name\nmlflow_run_name: # Your run name\nhf_mlflow_log_artifacts:  # set to true to copy each saved checkpoint on each save to mlflow artifact registry\n\n# Comet configuration if you're using it\n# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`.\n# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start\nuse_comet: # Enable or disable Comet integration.\ncomet_api_key: # API key for Comet. Recommended to set via `comet login`.\ncomet_workspace: # Workspace name in Comet. Defaults to the user's default workspace.\ncomet_project_name: # Project name in Comet. Defaults to Uncategorized.\ncomet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key.\ncomet_mode: # Create a new experiment (\"create\") or log to an existing one (\"get\"). Default (\"get_or_create\") auto-selects based on configuration.\ncomet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True.\ncomet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details.\n\n# Tensorboard\nuse_tensorboard: # Optional[bool]\n\n# Where to save the full-finetuned model to\noutput_dir: ./completed-model\n\n# Whether to use torch.compile and which backend to use\n# setting to `auto` will enable torch compile when torch>=2.5.1\ntorch_compile:  # Optional[Union[Literal[\"auto\"], bool]]\ntorch_compile_backend:  # Optional[str]\n\n# Training hyperparameters\n\n# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.\ngradient_accumulation_steps: 1\n# The number of samples to include in each batch. This is the number of samples sent to each GPU.\n# Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: 2\neval_batch_size:\nnum_epochs: 4\nwarmup_steps: 100  # cannot use with warmup_ratio\nwarmup_ratio: 0.05  # cannot use with warmup_steps\nlearning_rate: 0.00003\nlr_quadratic_warmup:\nlogging_steps:\neval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps\nevals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps\neval_strategy: # Set to `\"no\"` to skip evaluation, `\"epoch\"` at end of each epoch, leave empty to infer from `eval_steps`.\nsave_strategy: # Set to `\"no\"` to skip checkpoint saves, `\"epoch\"` at end of each epoch, `\"best\"` when better result is achieved, leave empty to infer from `save_steps`.\nsave_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps\nsaves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsave_total_limit: # Checkpoints saved at a time\n# Maximum number of iterations to train for. It precedes num_epochs which means that\n# if both are set, num_epochs will not be guaranteed.\n# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps:\n\n# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second:\n\neval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0\neval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128\neval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is [\"sacrebleu\", \"comet\", \"ter\", \"chrf\", \"perplexity\"]\n\nprofiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir.\n                # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information\n                # snapshots can be visualized @ https://pytorch.org/memory_viz\n\nloss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)\nloss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)\n\n# Save model as safetensors (require safetensors package)\nsave_safetensors:\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: false\n# Group similarly sized data to minimize padding.\n# May be slower to start, as it must download and sort the entire dataset.\n# Note that training loss may have an oscillating pattern with this enabled.\ngroup_by_length: false\n\n# Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: false\n# additional kwargs to pass to the trainer for gradient checkpointing\n# gradient_checkpointing_kwargs:\n#   use_reentrant: true\n\n# Stop training after this many evaluation losses have increased in a row\n# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback\nearly_stopping_patience: 3\n\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler: # 'one_cycle' | 'log_sweep' | empty for cosine\nlr_scheduler_kwargs:\ncosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr\ncosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)\n\n# For one_cycle optim\nlr_div_factor: # Learning rate div factor\n\n# Specify optimizer\n# Valid values are driven by the Transformers OptimizerNames class, see:\n# https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134\n#\n# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of\n# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used\n# in the examples/ for your model and fine-tuning use case.\n#\n# Valid values for 'optimizer' include:\n# - adamw_hf\n# - adamw_torch\n# - adamw_torch_fused\n# - adamw_torch_xla\n# - adamw_apex_fused\n# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1)\n# - adafactor\n# - adamw_anyprecision\n# - sgd\n# - adagrad\n# - adamw_bnb_8bit\n# - lion_8bit\n# - lion_32bit\n# - paged_adamw_32bit\n# - paged_adamw_8bit\n# - paged_lion_32bit\n# - paged_lion_8bit\n# - galore_adamw\n# - galore_adamw_8bit\n# - galore_adafactor\n# - galore_adamw_layerwise\n# - galore_adamw_8bit_layerwise\n# - galore_adafactor_layerwise\noptimizer:\n# Dictionary of arguments to pass to the optimizer\noptim_args:\n# For Galore Optimizers the following optim_args are available\n# rank:  # type: int\n# update_proj_gap  # type: int\n# scale  # type: float\n# proj_type:  # type: str, default = std\n\n# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm\noptim_target_modules:\n# - self_attn  # for llama\n# - mlp\n\n# Specify weight decay\nweight_decay:\n# adamw hyperparams\nadam_beta1:\nadam_beta2:\nadam_epsilon:\n# Gradient clipping max norm\nmax_grad_norm:\n\n# Augmentation techniques\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings\n# currently only supported on Llama and Mistral\nneftune_noise_alpha:\n\n# Whether to bettertransformers\nflash_optimum:\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers:\nxformers_attention:\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:\nflash_attention:\nflash_attn_cross_entropy:  # Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_rms_norm:  # Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_fuse_qkv: # Whether to fuse QKV into a single operation\nflash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation\n# Whether to use scaled-dot-product attention\n# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention:\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention:\n# Resume from a specific checkpoint dir\nresume_from_checkpoint:\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: false\n\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank:\n\n# Add or change special tokens.\n# If you add tokens here, you don't need to add them to the `tokens` list.\nspecial_tokens:\n  # bos_token: \"<s>\"\n  # eos_token: \"</s>\"\n  # unk_token: \"<unk>\"\n  # pad_token: \"[PAD]\"\n\n# Add extra tokens.\ntokens:\n\n# FSDP\nfsdp:\nfsdp_config:\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed:\n\n# Advanced DDP Arguments\nddp_timeout:\nddp_bucket_cap_mb:\nddp_broadcast_buffers:\n\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path:\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset:\n\n# Debug mode\ndebug:\n\n# Seed\nseed:\n\n# Allow overwrite yml config using from cli\nstrict:",
         "crumbs": [
           "Reference",
           "Config options"
    diff --git a/sitemap.xml b/sitemap.xml
    index ee085fc39..ff9966a77 100644
    --- a/sitemap.xml
    +++ b/sitemap.xml
    @@ -2,158 +2,158 @@
     
       
         https://axolotl-ai-cloud.github.io/axolotl/index.html
    -    2025-02-17T20:46:17.543Z
    +    2025-02-18T02:59:39.617Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/rlhf.html
    -    2025-02-17T20:46:17.532Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/amd_hpc.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.601Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/faq.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.602Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/inference.html
    -    2025-02-17T20:46:17.531Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/unsloth.html
    -    2025-02-17T20:46:17.532Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/multimodal.html
    -    2025-02-17T20:46:17.531Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/input_output.html
    -    2025-02-17T20:46:17.531Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/index.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.601Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/template_free.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.601Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/inst_tune.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.601Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/stepwise_supervised.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.601Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/reward_modelling.html
    -    2025-02-17T20:46:17.532Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/fsdp_qlora.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.602Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/lr_groups.html
    -    2025-02-17T20:46:17.531Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/mac.html
    -    2025-02-17T20:46:17.531Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/dataset_preprocessing.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.602Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html
    -    2025-02-17T20:46:17.547Z
    +    2025-02-18T02:59:39.620Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/examples/colab-notebooks/colab-axolotl-example.html
    -    2025-02-17T20:46:17.532Z
    +    2025-02-18T02:59:39.606Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/FAQS.html
    -    2025-02-17T20:46:17.526Z
    +    2025-02-18T02:59:39.600Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/TODO.html
    -    2025-02-17T20:46:17.527Z
    +    2025-02-18T02:59:39.600Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/kd/topk_logprob/LICENSE.html
    -    2025-02-17T20:46:17.547Z
    +    2025-02-18T02:59:39.621Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/LICENSE.html
    -    2025-02-17T20:46:17.546Z
    +    2025-02-18T02:59:39.620Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/installation.html
    -    2025-02-17T20:46:17.531Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/torchao.html
    -    2025-02-17T20:46:17.532Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/ray-integration.html
    -    2025-02-17T20:46:17.531Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/cli.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.601Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/debugging.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.602Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/tokenized.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.601Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/pretraining.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.601Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/conversation.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.601Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/multi-gpu.html
    -    2025-02-17T20:46:17.531Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/batch_vs_grad.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.601Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/getting-started.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.602Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/config.html
    -    2025-02-17T20:46:17.528Z
    +    2025-02-18T02:59:39.601Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/multipack.html
    -    2025-02-17T20:46:17.531Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/multi-node.html
    -    2025-02-17T20:46:17.531Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/nccl.html
    -    2025-02-17T20:46:17.531Z
    +    2025-02-18T02:59:39.605Z
       
       
         https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html
    -    2025-02-17T20:46:17.531Z
    +    2025-02-18T02:59:39.605Z