diff --git a/.nojekyll b/.nojekyll index 753e13154..d6daecbd7 100644 --- a/.nojekyll +++ b/.nojekyll @@ -1 +1 @@ -1129f90d \ No newline at end of file +bb18e79f \ No newline at end of file diff --git a/docs/config.html b/docs/config.html index 49c36b285..6ce9632b4 100644 --- a/docs/config.html +++ b/docs/config.html @@ -533,546 +533,552 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin # Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge lora_on_cpu: true -# A list of one or more datasets to finetune the model with -datasets: - # HuggingFace dataset repo | s3://,gs:// path | "json" for local dataset, make sure to fill data_files - - path: vicgalle/alpaca-gpt4 - # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection] - type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn> - ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file - data_files: # Optional[str] path to source data files - - shards: # Optional[int] split dataset into N pieces (use with shards_idx) - shards_idx: # Optional[int] = 0 the index of sharded dataset to use - - preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`) - - name: # Optional[str] name of dataset configuration to load - train_on_split: train # Optional[str] name of dataset split to load from - revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets. - trust_remote_code: # Optional[bool] Trust remote code for untrusted source - - # Custom user instruction prompt - - path: repo - type: - # The below are defaults. only set what's needed if you use a different column name. - system_prompt: "" - system_format: "{system}" - field_system: system - field_instruction: instruction - field_input: input - field_output: output - - # Customizable to be single line or multi-line - # Use {instruction}/{input} as key to be replaced - # 'format' can include {input} - format: |- - User: {instruction} {input} - Assistant: - # 'no_input_format' cannot include {input} - no_input_format: "{instruction} " - - # For `completion` datsets only, uses the provided field instead of `text` column - field: - - # Using chat template - - path: ... - # Set type to `chat_template` to use this strategy - type: chat_template - # Specify the name of the chat template to use - # The name of the chat template to use for training, following values are supported: - # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default. - # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py - # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml. - # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field. - chat_template: tokenizer_default - - # Custom jinja chat template. Used only if `chat_template: jinja` or empty. - chat_template_jinja: - - # Key containing the messages (default: "messages") - field_messages: messages +# List[str]. Add plugins to extend the pipeline. +# See `src/axolotl/integrations` for the available plugins or doc below for more details. +# https://axolotl-ai-cloud.github.io/axolotl/docs/custom_integrations.html +plugins: + # - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin + +# A list of one or more datasets to finetune the model with +datasets: + # HuggingFace dataset repo | s3://,gs:// path | "json" for local dataset, make sure to fill data_files + - path: vicgalle/alpaca-gpt4 + # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection] + type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn> + ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file + data_files: # Optional[str] path to source data files + + shards: # Optional[int] split dataset into N pieces (use with shards_idx) + shards_idx: # Optional[int] = 0 the index of sharded dataset to use + + preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`) + + name: # Optional[str] name of dataset configuration to load + train_on_split: train # Optional[str] name of dataset split to load from + revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets. + trust_remote_code: # Optional[bool] Trust remote code for untrusted source + + # Custom user instruction prompt + - path: repo + type: + # The below are defaults. only set what's needed if you use a different column name. + system_prompt: "" + system_format: "{system}" + field_system: system + field_instruction: instruction + field_input: input + field_output: output + + # Customizable to be single line or multi-line + # Use {instruction}/{input} as key to be replaced + # 'format' can include {input} + format: |- + User: {instruction} {input} + Assistant: + # 'no_input_format' cannot include {input} + no_input_format: "{instruction} " + + # For `completion` datsets only, uses the provided field instead of `text` column + field: + + # Using chat template + - path: ... + # Set type to `chat_template` to use this strategy + type: chat_template + # Specify the name of the chat template to use + # The name of the chat template to use for training, following values are supported: + # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default. + # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py + # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml. + # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field. + chat_template: tokenizer_default - # Mapping of properties from the input dataset to the chat template. - # (default: message_property_mappings={'role':'role', 'content':'content'}) - # If a property exists in the template but not in this mapping, the system will attempt - # to load it directly from the message using the property name as the key. - # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role', - # while 'value' is loaded and used as 'content' in the chat template. - message_property_mappings: - role: from - content: value - # ... - - # Optional[Dict[str, List]]. Roles mapping in the messages. The default is: - roles: - user: ["human", "user"] - assistant: ["gpt", "assistant"] - system: ["system"] - tool: ["tool"] - - # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template. - # This does not drop the default system message from chat_template if it exists. If you wish to, - # we recommend using a custom jinja template with the default system message removed or - # adding a system turn with empty content. - drop_system_message: + # Custom jinja chat template. Used only if `chat_template: jinja` or empty. + chat_template_jinja: + + # Key containing the messages (default: "messages") + field_messages: messages + + # Mapping of properties from the input dataset to the chat template. + # (default: message_property_mappings={'role':'role', 'content':'content'}) + # If a property exists in the template but not in this mapping, the system will attempt + # to load it directly from the message using the property name as the key. + # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role', + # while 'value' is loaded and used as 'content' in the chat template. + message_property_mappings: + role: from + content: value + # ... + + # Optional[Dict[str, List]]. Roles mapping in the messages. The default is: + roles: + user: ["human", "user"] + assistant: ["gpt", "assistant"] + system: ["system"] + tool: ["tool"] - # IMPORTANT: The following fields determine which parts of the conversation to train on. - # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train - # See examples at `docs/dataset-formats/conversation.qmd` - # Note: If the below 4 fields are set to empty, defaults to training only on the last message. - - # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss. - roles_to_train: ["assistant"] # default - # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are: - # - all: train on all EOS tokens - # - turn (default): train on the EOS token at the end of each trainable turn - # - last: train on the last EOS token in the conversation - # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`. - train_on_eos: last - # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`. - message_field_training: training - # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn. - # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train). - message_field_training_detail: train_detail - - -# If false, the datasets will not be shuffled and will keep their original order in `datasets`. -# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true. -shuffle_merged_datasets: true - -Deduplicates datasets and test_datasets with identical entries. -dataset_exact_deduplication: true - -# A list of one or more datasets to eval the model with. -# You can use either test_datasets, or val_set_size, but not both. -test_datasets: - - path: /workspace/data/eval.jsonl - ds_type: json - # You need to specify a split. For "json" datasets the default split is called "train". - split: train - type: completion - data_files: - - /workspace/data/eval.jsonl - -# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo' -rl: -rl_beta: # Optional[float]. The beta parameter for the RL training. - -# dpo -dpo_use_weighting: # Optional[bool]. Whether to perform weighting. -rpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper. - -# orpo -orpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping. - -# kto -kto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss. -kto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss. - -# simpo -cpo_alpha: 1.0 # Weight of the BC regularizer -simpo_gamma: 0.5 # Target reward margin for the SimPO loss - -# grpo -trl: - use_vllm: # Optional[bool]. Whether to use VLLM for RL training. - vllm_device: # Optional[str]. Device to use for VLLM. - vllm_gpu_memory_utilization: # Optional[float]. GPU memory utilization for VLLM. - vllm_max_model_len: # Optional[int]. Maximum length of the model for VLLM. - vllm_dtype: # Optional[str]. Data type for VLLM. - - beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use - max_completion_length: # Optional[int]. Maximum length of the completion for RL training. - - reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir. - reward_weights: # Optional[list[float]]. List of reward weights for the reward functions. + # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template. + # This does not drop the default system message from chat_template if it exists. If you wish to, + # we recommend using a custom jinja template with the default system message removed or + # adding a system turn with empty content. + drop_system_message: + + # IMPORTANT: The following fields determine which parts of the conversation to train on. + # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train + # See examples at `docs/dataset-formats/conversation.qmd` + # Note: If the below 4 fields are set to empty, defaults to training only on the last message. + + # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss. + roles_to_train: ["assistant"] # default + # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are: + # - all: train on all EOS tokens + # - turn (default): train on the EOS token at the end of each trainable turn + # - last: train on the last EOS token in the conversation + # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`. + train_on_eos: last + # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`. + message_field_training: training + # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn. + # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train). + message_field_training_detail: train_detail + + +# If false, the datasets will not be shuffled and will keep their original order in `datasets`. +# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true. +shuffle_merged_datasets: true + +Deduplicates datasets and test_datasets with identical entries. +dataset_exact_deduplication: true + +# A list of one or more datasets to eval the model with. +# You can use either test_datasets, or val_set_size, but not both. +test_datasets: + - path: /workspace/data/eval.jsonl + ds_type: json + # You need to specify a split. For "json" datasets the default split is called "train". + split: train + type: completion + data_files: + - /workspace/data/eval.jsonl + +# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo' +rl: +rl_beta: # Optional[float]. The beta parameter for the RL training. + +# dpo +dpo_use_weighting: # Optional[bool]. Whether to perform weighting. +rpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper. + +# orpo +orpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping. + +# kto +kto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss. +kto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss. + +# simpo +cpo_alpha: 1.0 # Weight of the BC regularizer +simpo_gamma: 0.5 # Target reward margin for the SimPO loss + +# grpo +trl: + use_vllm: # Optional[bool]. Whether to use VLLM for RL training. + vllm_device: # Optional[str]. Device to use for VLLM. + vllm_gpu_memory_utilization: # Optional[float]. GPU memory utilization for VLLM. + vllm_max_model_len: # Optional[int]. Maximum length of the model for VLLM. + vllm_dtype: # Optional[str]. Data type for VLLM. - num_generations: # Optional[int]. Number of generations to sample. - log_completions: # Optional[bool]. Whether to log completions. + beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use + max_completion_length: # Optional[int]. Maximum length of the completion for RL training. - sync_ref_model: # Optional[bool]. Whether to sync the reference model. - ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model. - ref_model_sync_steps: # Optional[int]. Sync steps for the reference model. - - -# reward modelling: `True` or `False` -reward_model: - -# process reward modelling: `True` or `False` -process_reward_model: + reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir. + reward_weights: # Optional[list[float]]. List of reward weights for the reward functions. + + num_generations: # Optional[int]. Number of generations to sample. + log_completions: # Optional[bool]. Whether to log completions. + + sync_ref_model: # Optional[bool]. Whether to sync the reference model. + ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model. + ref_model_sync_steps: # Optional[int]. Sync steps for the reference model. + -# The name of the chat template to use for training, following values are supported: -# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value. -# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py -# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer. -# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field. -# The selected chat template will be saved to the tokenizer_config.json for easier inferencing -# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template. -chat_template: tokenizer_default -# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null. -chat_template_jinja: null -# Changes the default system message. Currently only supports chatml. -default_system_message: You are a helpful assistant. Please give a long and detailed answer. -# Axolotl attempts to save the dataset as an arrow after packing the data together so -# subsequent training attempts load faster, relative path -dataset_prepared_path: data/last_run_prepared -# Push prepared dataset to hub -push_dataset_to_hub: # Optional[str] repo_org/repo_name -# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()` -# if not set. -dataset_processes: # defaults to os.cpu_count() if not set -# Keep dataset in memory while preprocessing -# Only needed if cached dataset is taking too much storage -dataset_keep_in_memory: -# push checkpoints to hub -hub_model_id: # private repo path to push finetuned model -# how to push checkpoints to hub -# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy -hub_strategy: -# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets -# Required to be true when used in combination with `push_dataset_to_hub` -hf_use_auth_token: # boolean -# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval. -val_set_size: 0.04 -# Num shards for whole dataset -dataset_shard_num: -# Index of shard to use for whole dataset -dataset_shard_idx: - -# The maximum length of an input to train with, this should typically be less than 2048 -# as most models have a token/context limit of 2048 -sequence_len: 2048 -# Pad inputs so each step uses constant sized buffers -# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently -pad_to_sequence_len: -# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true' -sample_packing: -# Set to 'false' if getting errors during eval with sample_packing on. -eval_sample_packing: -# You can set these packing optimizations AFTER starting a training at least once. -# The trainer will provide recommended values for these values. -sample_packing_eff_est: -total_num_tokens: -# Increasing the following values helps with packing, but usually only slightly (<%1.) -# The number of samples packed at a time. -sample_packing_group_size: 100000 -# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples. -sample_packing_bin_size: 200 -# whether to concatenate samples during pretraining -pretraining_sample_concatenation: - -# Use batch flattening for speedups when not using sample_packing -batch_flattening: - -# Passed through to transformers when loading the model when launched without accelerate -# Use `sequential` when training w/ model parallelism to limit memory -device_map: -# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model. -max_memory: +# reward modelling: `True` or `False` +reward_model: + +# process reward modelling: `True` or `False` +process_reward_model: + +# The name of the chat template to use for training, following values are supported: +# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value. +# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py +# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer. +# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field. +# The selected chat template will be saved to the tokenizer_config.json for easier inferencing +# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template. +chat_template: tokenizer_default +# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null. +chat_template_jinja: null +# Changes the default system message. Currently only supports chatml. +default_system_message: You are a helpful assistant. Please give a long and detailed answer. +# Axolotl attempts to save the dataset as an arrow after packing the data together so +# subsequent training attempts load faster, relative path +dataset_prepared_path: data/last_run_prepared +# Push prepared dataset to hub +push_dataset_to_hub: # Optional[str] repo_org/repo_name +# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()` +# if not set. +dataset_processes: # defaults to os.cpu_count() if not set +# Keep dataset in memory while preprocessing +# Only needed if cached dataset is taking too much storage +dataset_keep_in_memory: +# push checkpoints to hub +hub_model_id: # private repo path to push finetuned model +# how to push checkpoints to hub +# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy +hub_strategy: +# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets +# Required to be true when used in combination with `push_dataset_to_hub` +hf_use_auth_token: # boolean +# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval. +val_set_size: 0.04 +# Num shards for whole dataset +dataset_shard_num: +# Index of shard to use for whole dataset +dataset_shard_idx: + +# The maximum length of an input to train with, this should typically be less than 2048 +# as most models have a token/context limit of 2048 +sequence_len: 2048 +# Pad inputs so each step uses constant sized buffers +# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently +pad_to_sequence_len: +# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true' +sample_packing: +# Set to 'false' if getting errors during eval with sample_packing on. +eval_sample_packing: +# You can set these packing optimizations AFTER starting a training at least once. +# The trainer will provide recommended values for these values. +sample_packing_eff_est: +total_num_tokens: +# Increasing the following values helps with packing, but usually only slightly (<%1.) +# The number of samples packed at a time. +sample_packing_group_size: 100000 +# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples. +sample_packing_bin_size: 200 +# whether to concatenate samples during pretraining +pretraining_sample_concatenation: + +# Use batch flattening for speedups when not using sample_packing +batch_flattening: -# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model -adapter: lora -# If you already have a lora model trained that you want to load, put that here. -# This means after training, if you want to test the model, you should set this to the value of `output_dir`. -# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`. -lora_model_dir: - -# LoRA hyperparameters -# For more details about the following options, see: -# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2 -lora_r: 8 -lora_alpha: 16 -lora_dropout: 0.05 -lora_target_modules: - - q_proj - - v_proj -# - k_proj -# - o_proj -# - gate_proj -# - down_proj -# - up_proj -lora_target_linear: # If true, will target all linear modules -peft_layers_to_transform: # The layer indices to transform, otherwise, apply to all layers - -# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens. -# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models. -# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities. -# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994 -lora_modules_to_save: -# - embed_tokens -# - lm_head - -lora_fan_in_fan_out: false - -# Apply custom LoRA autograd functions and activation function Triton kernels for -# speed and memory savings -# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html -lora_mlp_kernel: true -lora_qkv_kernel: true -lora_o_kernel: true - -# LoRA+ hyperparameters -# For more details about the following options, see: -# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py` -loraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4. -loraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6. +# Passed through to transformers when loading the model when launched without accelerate +# Use `sequential` when training w/ model parallelism to limit memory +device_map: +# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model. +max_memory: + +# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model +adapter: lora +# If you already have a lora model trained that you want to load, put that here. +# This means after training, if you want to test the model, you should set this to the value of `output_dir`. +# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`. +lora_model_dir: + +# LoRA hyperparameters +# For more details about the following options, see: +# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2 +lora_r: 8 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_modules: + - q_proj + - v_proj +# - k_proj +# - o_proj +# - gate_proj +# - down_proj +# - up_proj +lora_target_linear: # If true, will target all linear modules +peft_layers_to_transform: # The layer indices to transform, otherwise, apply to all layers + +# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens. +# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models. +# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities. +# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994 +lora_modules_to_save: +# - embed_tokens +# - lm_head + +lora_fan_in_fan_out: false + +# Apply custom LoRA autograd functions and activation function Triton kernels for +# speed and memory savings +# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html +lora_mlp_kernel: true +lora_qkv_kernel: true +lora_o_kernel: true -peft: - # Configuration options for loftq initialization for LoRA - # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization - loftq_config: - loftq_bits: # typically 4 bits +# LoRA+ hyperparameters +# For more details about the following options, see: +# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py` +loraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4. +loraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6. -# ReLoRA configuration -# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed -relora_steps: # Number of steps per ReLoRA restart -relora_warmup_steps: # Number of per-restart warmup steps -relora_anneal_steps: # Number of anneal steps for each relora cycle -relora_prune_ratio: # threshold for optimizer magnitude when pruning -relora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings - -# wandb configuration if you're using it -# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`. -wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb -wandb_project: # Your wandb project name -wandb_entity: # A wandb Team name if using a Team -wandb_watch: -wandb_name: # Set the name of your wandb run -wandb_run_id: # Set the ID of your wandb run -wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training - -# mlflow configuration if you're using it -mlflow_tracking_uri: # URI to mlflow -mlflow_experiment_name: # Your experiment name -mlflow_run_name: # Your run name -hf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry +peft: + # Configuration options for loftq initialization for LoRA + # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization + loftq_config: + loftq_bits: # typically 4 bits + +# ReLoRA configuration +# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed +relora_steps: # Number of steps per ReLoRA restart +relora_warmup_steps: # Number of per-restart warmup steps +relora_anneal_steps: # Number of anneal steps for each relora cycle +relora_prune_ratio: # threshold for optimizer magnitude when pruning +relora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings + +# wandb configuration if you're using it +# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`. +wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb +wandb_project: # Your wandb project name +wandb_entity: # A wandb Team name if using a Team +wandb_watch: +wandb_name: # Set the name of your wandb run +wandb_run_id: # Set the ID of your wandb run +wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training -# Comet configuration if you're using it -# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`. -# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start -use_comet: # Enable or disable Comet integration. -comet_api_key: # API key for Comet. Recommended to set via `comet login`. -comet_workspace: # Workspace name in Comet. Defaults to the user's default workspace. -comet_project_name: # Project name in Comet. Defaults to Uncategorized. -comet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key. -comet_mode: # Create a new experiment ("create") or log to an existing one ("get"). Default ("get_or_create") auto-selects based on configuration. -comet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True. -comet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details. - -# Tensorboard -use_tensorboard: # Optional[bool] - -# Where to save the full-finetuned model to -output_dir: ./completed-model +# mlflow configuration if you're using it +mlflow_tracking_uri: # URI to mlflow +mlflow_experiment_name: # Your experiment name +mlflow_run_name: # Your run name +hf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry + +# Comet configuration if you're using it +# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`. +# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start +use_comet: # Enable or disable Comet integration. +comet_api_key: # API key for Comet. Recommended to set via `comet login`. +comet_workspace: # Workspace name in Comet. Defaults to the user's default workspace. +comet_project_name: # Project name in Comet. Defaults to Uncategorized. +comet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key. +comet_mode: # Create a new experiment ("create") or log to an existing one ("get"). Default ("get_or_create") auto-selects based on configuration. +comet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True. +comet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details. -# Whether to use torch.compile and which backend to use -# setting to `auto` will enable torch compile when torch>=2.5.1 -torch_compile: # Optional[Union[Literal["auto"], bool]] -torch_compile_backend: # Optional[str] - -# Training hyperparameters - -# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps. -gradient_accumulation_steps: 1 -# The number of samples to include in each batch. This is the number of samples sent to each GPU. -# Batch size per gpu = micro_batch_size * gradient_accumulation_steps -micro_batch_size: 2 -eval_batch_size: -num_epochs: 4 -warmup_steps: 100 # cannot use with warmup_ratio -warmup_ratio: 0.05 # cannot use with warmup_steps -learning_rate: 0.00003 -lr_quadratic_warmup: -logging_steps: -eval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps -evals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps -eval_strategy: # Set to `"no"` to skip evaluation, `"epoch"` at end of each epoch, leave empty to infer from `eval_steps`. -save_strategy: # Set to `"no"` to skip checkpoint saves, `"epoch"` at end of each epoch, `"best"` when better result is achieved, leave empty to infer from `save_steps`. -save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps -saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps -save_total_limit: # Checkpoints saved at a time -# Maximum number of iterations to train for. It precedes num_epochs which means that -# if both are set, num_epochs will not be guaranteed. -# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps -max_steps: - -# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time. -include_tokens_per_second: # Optional[bool] - -# whether to find batch size that fits in memory. Passed to underlying transformers Trainer -auto_find_batch_size: # Optional[bool] +# Tensorboard +use_tensorboard: # Optional[bool] + +# Where to save the full-finetuned model to +output_dir: ./completed-model + +# Whether to use torch.compile and which backend to use +# setting to `auto` will enable torch compile when torch>=2.5.1 +torch_compile: # Optional[Union[Literal["auto"], bool]] +torch_compile_backend: # Optional[str] + +# Training hyperparameters + +# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps. +gradient_accumulation_steps: 1 +# The number of samples to include in each batch. This is the number of samples sent to each GPU. +# Batch size per gpu = micro_batch_size * gradient_accumulation_steps +micro_batch_size: 2 +eval_batch_size: +num_epochs: 4 +warmup_steps: 100 # cannot use with warmup_ratio +warmup_ratio: 0.05 # cannot use with warmup_steps +learning_rate: 0.00003 +lr_quadratic_warmup: +logging_steps: +eval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps +evals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps +eval_strategy: # Set to `"no"` to skip evaluation, `"epoch"` at end of each epoch, leave empty to infer from `eval_steps`. +save_strategy: # Set to `"no"` to skip checkpoint saves, `"epoch"` at end of each epoch, `"best"` when better result is achieved, leave empty to infer from `save_steps`. +save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps +saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps +save_total_limit: # Checkpoints saved at a time +# Maximum number of iterations to train for. It precedes num_epochs which means that +# if both are set, num_epochs will not be guaranteed. +# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps +max_steps: -eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0 -eval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128 -eval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is ["sacrebleu", "comet", "ter", "chrf", "perplexity"] - -profiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir. - # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information - # snapshots can be visualized @ https://pytorch.org/memory_viz - -loss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training) -loss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3) - -# Save model as safetensors (require safetensors package) -save_safetensors: +# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time. +include_tokens_per_second: # Optional[bool] + +# whether to find batch size that fits in memory. Passed to underlying transformers Trainer +auto_find_batch_size: # Optional[bool] + +eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0 +eval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128 +eval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is ["sacrebleu", "comet", "ter", "chrf", "perplexity"] + +profiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir. + # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information + # snapshots can be visualized @ https://pytorch.org/memory_viz -# Whether to mask out or include the human's prompt from the training labels -train_on_inputs: false -# Group similarly sized data to minimize padding. -# May be slower to start, as it must download and sort the entire dataset. -# Note that training loss may have an oscillating pattern with this enabled. -group_by_length: false - -# Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing -gradient_checkpointing: false -# additional kwargs to pass to the trainer for gradient checkpointing -# gradient_checkpointing_kwargs: -# use_reentrant: true +loss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training) +loss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3) + +# Save model as safetensors (require safetensors package) +save_safetensors: + +# Whether to mask out or include the human's prompt from the training labels +train_on_inputs: false +# Group similarly sized data to minimize padding. +# May be slower to start, as it must download and sort the entire dataset. +# Note that training loss may have an oscillating pattern with this enabled. +group_by_length: false -# Stop training after this many evaluation losses have increased in a row -# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback -early_stopping_patience: 3 - -# Specify a scheduler and kwargs to use with the optimizer -lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine -lr_scheduler_kwargs: -cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr -cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf) +# Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing +gradient_checkpointing: false +# additional kwargs to pass to the trainer for gradient checkpointing +# gradient_checkpointing_kwargs: +# use_reentrant: true + +# Stop training after this many evaluation losses have increased in a row +# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback +early_stopping_patience: 3 -# For one_cycle optim -lr_div_factor: # Learning rate div factor - -# Specify optimizer -# Valid values are driven by the Transformers OptimizerNames class, see: -# https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134 -# -# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of -# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used -# in the examples/ for your model and fine-tuning use case. -# -# Valid values for 'optimizer' include: -# - adamw_hf -# - adamw_torch -# - adamw_torch_fused -# - adamw_torch_xla -# - adamw_apex_fused -# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1) -# - adafactor -# - adamw_anyprecision -# - sgd -# - adagrad -# - adamw_bnb_8bit -# - lion_8bit -# - lion_32bit -# - paged_adamw_32bit -# - paged_adamw_8bit -# - paged_lion_32bit -# - paged_lion_8bit -# - galore_adamw -# - galore_adamw_8bit -# - galore_adafactor -# - galore_adamw_layerwise -# - galore_adamw_8bit_layerwise -# - galore_adafactor_layerwise -optimizer: -# Dictionary of arguments to pass to the optimizer -optim_args: -# For Galore Optimizers the following optim_args are available -# rank: # type: int -# update_proj_gap # type: int -# scale # type: float -# proj_type: # type: str, default = std - -# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm -optim_target_modules: -# - self_attn # for llama -# - mlp - -# Specify weight decay -weight_decay: -# adamw hyperparams -adam_beta1: -adam_beta2: -adam_epsilon: -# Gradient clipping max norm -max_grad_norm: - -# Augmentation techniques -# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings -# currently only supported on Llama and Mistral -neftune_noise_alpha: - -# Whether to bettertransformers -flash_optimum: -# Whether to use xformers attention patch https://github.com/facebookresearch/xformers: -xformers_attention: -# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention: -flash_attention: -flash_attn_cross_entropy: # Whether to use flash-attention cross entropy implementation - advanced use only -flash_attn_rms_norm: # Whether to use flash-attention rms norm implementation - advanced use only -flash_attn_fuse_qkv: # Whether to fuse QKV into a single operation -flash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation -# Whether to use scaled-dot-product attention -# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html -sdp_attention: -# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf -s2_attention: -# Optional[bool]. Whether to use low_cpu_mem_usage -low_cpu_mem_usage: -# Resume from a specific checkpoint dir -resume_from_checkpoint: -# If resume_from_checkpoint isn't set and you simply want it to start where it left off. -# Be careful with this being turned on between different models. -auto_resume_from_checkpoints: false - -# Don't mess with this, it's here for accelerate and torchrun -local_rank: - -# Add or change special tokens. -# If you add tokens here, you don't need to add them to the `tokens` list. -special_tokens: - # bos_token: "<s>" - # eos_token: "</s>" - # unk_token: "<unk>" - # pad_token: "[PAD]" - -# Add extra tokens. -tokens: - -# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer. -# Only works for tokens that are not part of the base vocab (aka are added_tokens). -# Can be checked if they exist in tokenizer.json added_tokens. -added_tokens_overrides: # Dict[int, str] -# 128041: "<|im_start|>" -# 128042: "<|im_end|>" - -# FSDP -fsdp: -fsdp_config: - -# Deepspeed config path. e.g., deepspeed_configs/zero3.json -deepspeed: - -# Advanced DDP Arguments -ddp_timeout: -ddp_bucket_cap_mb: -ddp_broadcast_buffers: - -# Path to torch distx for optim 'adamw_anyprecision' -torchdistx_path: - -# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize -pretraining_dataset: +# Specify a scheduler and kwargs to use with the optimizer +lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine +lr_scheduler_kwargs: +cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr +cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf) + +# For one_cycle optim +lr_div_factor: # Learning rate div factor + +# Specify optimizer +# Valid values are driven by the Transformers OptimizerNames class, see: +# https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134 +# +# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of +# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used +# in the examples/ for your model and fine-tuning use case. +# +# Valid values for 'optimizer' include: +# - adamw_hf +# - adamw_torch +# - adamw_torch_fused +# - adamw_torch_xla +# - adamw_apex_fused +# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1) +# - adafactor +# - adamw_anyprecision +# - sgd +# - adagrad +# - adamw_bnb_8bit +# - lion_8bit +# - lion_32bit +# - paged_adamw_32bit +# - paged_adamw_8bit +# - paged_lion_32bit +# - paged_lion_8bit +# - galore_adamw +# - galore_adamw_8bit +# - galore_adafactor +# - galore_adamw_layerwise +# - galore_adamw_8bit_layerwise +# - galore_adafactor_layerwise +optimizer: +# Dictionary of arguments to pass to the optimizer +optim_args: +# For Galore Optimizers the following optim_args are available +# rank: # type: int +# update_proj_gap # type: int +# scale # type: float +# proj_type: # type: str, default = std + +# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm +optim_target_modules: +# - self_attn # for llama +# - mlp + +# Specify weight decay +weight_decay: +# adamw hyperparams +adam_beta1: +adam_beta2: +adam_epsilon: +# Gradient clipping max norm +max_grad_norm: + +# Augmentation techniques +# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings +# currently only supported on Llama and Mistral +neftune_noise_alpha: + +# Whether to bettertransformers +flash_optimum: +# Whether to use xformers attention patch https://github.com/facebookresearch/xformers: +xformers_attention: +# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention: +flash_attention: +flash_attn_cross_entropy: # Whether to use flash-attention cross entropy implementation - advanced use only +flash_attn_rms_norm: # Whether to use flash-attention rms norm implementation - advanced use only +flash_attn_fuse_qkv: # Whether to fuse QKV into a single operation +flash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation +# Whether to use scaled-dot-product attention +# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html +sdp_attention: +# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf +s2_attention: +# Optional[bool]. Whether to use low_cpu_mem_usage +low_cpu_mem_usage: +# Resume from a specific checkpoint dir +resume_from_checkpoint: +# If resume_from_checkpoint isn't set and you simply want it to start where it left off. +# Be careful with this being turned on between different models. +auto_resume_from_checkpoints: false + +# Don't mess with this, it's here for accelerate and torchrun +local_rank: + +# Add or change special tokens. +# If you add tokens here, you don't need to add them to the `tokens` list. +special_tokens: + # bos_token: "<s>" + # eos_token: "</s>" + # unk_token: "<unk>" + # pad_token: "[PAD]" + +# Add extra tokens. +tokens: + +# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer. +# Only works for tokens that are not part of the base vocab (aka are added_tokens). +# Can be checked if they exist in tokenizer.json added_tokens. +added_tokens_overrides: # Dict[int, str] +# 128041: "<|im_start|>" +# 128042: "<|im_end|>" + +# FSDP +fsdp: +fsdp_config: + +# Deepspeed config path. e.g., deepspeed_configs/zero3.json +deepspeed: + +# Advanced DDP Arguments +ddp_timeout: +ddp_bucket_cap_mb: +ddp_broadcast_buffers: -# Debug mode -debug: +# Path to torch distx for optim 'adamw_anyprecision' +torchdistx_path: -# Seed -seed: +# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize +pretraining_dataset: -# Allow overwrite yml config using from cli -strict: +# Debug mode +debug: + +# Seed +seed: + +# Allow overwrite yml config using from cli +strict: diff --git a/docs/custom_integrations.html b/docs/custom_integrations.html index 64e4d6bf1..5957e8c31 100644 --- a/docs/custom_integrations.html +++ b/docs/custom_integrations.html @@ -458,6 +458,7 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin
  • Usage
  • Citation
  • +
  • Adding a new integration
  • @@ -665,9 +666,70 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin url={https://arxiv.org/abs/2406.06623}, }

    Please see reference here

    - - + +
    +

    Adding a new integration

    +

    Plugins can be used to customize the behavior of the training pipeline through hooks. See axolotl.integrations.BasePlugin for the possible hooks.

    +

    To add a new integration, please follow these steps:

    +
      +
    1. Create a new folder in the src/axolotl/integrations directory.
    2. +
    3. Add any relevant files (LICENSE, README.md, ACKNOWLEDGEMENTS.md, etc.) to the new folder.
    4. +
    5. Add __init__.py and args.py files to the new folder.
    6. +
    + +
      +
    1. (If applicable) Add CPU tests under tests/integrations or GPU tests under tests/e2e/integrations.
    2. +
    +
    +
    +
    + +
    +
    +Tip +
    +
    +
    +

    See src/axolotl/integrations/cut_cross_entropy for a minimal integration example.

    +
    +
    +
    +
    +
    + +
    +
    +Warning +
    +
    +
    +

    If you could not load your integration, please ensure you are pip installing in editable mode.

    +
    pip install -e .
    +

    and correctly spelled the integration name in the config file.

    +
    plugins:
    +  - axolotl.integrations.your_integration_name.YourIntegrationPlugin
    +
    +
    +
    +
    +
    + +
    +
    +Note +
    +
    +
    +

    It is not necessary to place your integration in the integrations folder. It can be in any location, so long as it’s installed in a package in your python env.

    +

    See this repo for an example: https://github.com/axolotl-ai-cloud/diff-transformer

    +
    +
    + +
    diff --git a/docs/installation.html b/docs/installation.html index f3597eb2a..631b898d3 100644 --- a/docs/installation.html +++ b/docs/installation.html @@ -536,6 +536,7 @@ Advanced Docker Configuration
  • Latitude.sh
  • JarvisLabs.ai
  • RunPod
  • +
  • Novita
  • diff --git a/search.json b/search.json index 6a3ef3f03..54d84d994 100644 --- a/search.json +++ b/search.json @@ -152,7 +152,7 @@ "href": "docs/config.html", "title": "Config Reference", "section": "", - "text": "# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files\n# This can also be a relative path to a model on disk\nbase_model: ./llama-7b-hf\n# You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)\nbase_model_ignore_patterns:\n# If the base_model repo on hf hub doesn't include configuration .json files,\n# You can set that here, or leave this empty to default to base_model\nbase_model_config: ./llama-7b-hf\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model:\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config:\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too\nmodel_type: AutoModelForCausalLM\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: AutoTokenizer\n# Trust remote code for untrusted source\ntrust_remote_code:\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast:\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy:\n# Resize the model embeddings when new tokens are added to multiples of 32\n# This is reported to improve training speed on some models\nresize_token_embeddings_to_32x:\n# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings:\n\n# (Internal use only)\n# Used to identify which the model is based on\nis_falcon_derived_model:\nis_llama_derived_model:\nis_qwen_derived_model:\n# Please note that if you set this to true, `padding_side` will be set to \"left\" by default\nis_mistral_derived_model:\n\n# optional overrides to the base model configuration\noverrides_of_model_config:\n # RoPE Scaling https://github.com/huggingface/transformers/pull/24653\n rope_scaling:\n type: # linear | dynamic\n factor: # float\n\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs:\n # use_cache: False\n\n# optional overrides to the bnb 4bit quantization configuration\n# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig\nbnb_config_kwargs:\n # These are default values\n llm_int8_has_fp16_weight: false\n bnb_4bit_quant_type: nf4\n bnb_4bit_use_double_quant: true\n\n\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: true\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: true\n# Use bitsandbytes 4 bit\nload_in_4bit:\n\n# Use CUDA bf16\nbf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere\n# Use CUDA fp16\nfp16: true\n# Use CUDA tf32\ntf32: true # require >=ampere\n\n# No AMP (automatic mixed precision)\nbfloat16: true # require >=ampere\nfloat16: true\n\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset\ngpu_memory_limit: 20GiB\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: true\n\n# A list of one or more datasets to finetune the model with\ndatasets:\n # HuggingFace dataset repo | s3://,gs:// path | \"json\" for local dataset, make sure to fill data_files\n - path: vicgalle/alpaca-gpt4\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>\n ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file\n data_files: # Optional[str] path to source data files\n\n shards: # Optional[int] split dataset into N pieces (use with shards_idx)\n shards_idx: # Optional[int] = 0 the index of sharded dataset to use\n\n preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)\n\n name: # Optional[str] name of dataset configuration to load\n train_on_split: train # Optional[str] name of dataset split to load from\n revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets.\n trust_remote_code: # Optional[bool] Trust remote code for untrusted source\n\n # Custom user instruction prompt\n - path: repo\n type:\n # The below are defaults. only set what's needed if you use a different column name.\n system_prompt: \"\"\n system_format: \"{system}\"\n field_system: system\n field_instruction: instruction\n field_input: input\n field_output: output\n\n # Customizable to be single line or multi-line\n # Use {instruction}/{input} as key to be replaced\n # 'format' can include {input}\n format: |-\n User: {instruction} {input}\n Assistant:\n # 'no_input_format' cannot include {input}\n no_input_format: \"{instruction} \"\n\n # For `completion` datsets only, uses the provided field instead of `text` column\n field:\n\n # Using chat template\n - path: ...\n # Set type to `chat_template` to use this strategy\n type: chat_template\n # Specify the name of the chat template to use\n # The name of the chat template to use for training, following values are supported:\n # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default.\n # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml.\n # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n chat_template: tokenizer_default\n\n # Custom jinja chat template. Used only if `chat_template: jinja` or empty.\n chat_template_jinja:\n\n # Key containing the messages (default: \"messages\")\n field_messages: messages\n\n # Mapping of properties from the input dataset to the chat template.\n # (default: message_property_mappings={'role':'role', 'content':'content'})\n # If a property exists in the template but not in this mapping, the system will attempt\n # to load it directly from the message using the property name as the key.\n # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role',\n # while 'value' is loaded and used as 'content' in the chat template.\n message_property_mappings:\n role: from\n content: value\n # ...\n\n # Optional[Dict[str, List]]. Roles mapping in the messages. The default is:\n roles:\n user: [\"human\", \"user\"]\n assistant: [\"gpt\", \"assistant\"]\n system: [\"system\"]\n tool: [\"tool\"]\n\n # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If you wish to,\n # we recommend using a custom jinja template with the default system message removed or\n # adding a system turn with empty content.\n drop_system_message:\n\n # IMPORTANT: The following fields determine which parts of the conversation to train on.\n # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train\n # See examples at `docs/dataset-formats/conversation.qmd`\n # Note: If the below 4 fields are set to empty, defaults to training only on the last message.\n\n # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: [\"assistant\"] # default\n # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are:\n # - all: train on all EOS tokens\n # - turn (default): train on the EOS token at the end of each trainable turn\n # - last: train on the last EOS token in the conversation\n # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.\n train_on_eos: last\n # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.\n message_field_training: training\n # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.\n # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train).\n message_field_training_detail: train_detail\n\n\n# If false, the datasets will not be shuffled and will keep their original order in `datasets`.\n# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: true\n\nDeduplicates datasets and test_datasets with identical entries.\ndataset_exact_deduplication: true\n\n# A list of one or more datasets to eval the model with.\n# You can use either test_datasets, or val_set_size, but not both.\ntest_datasets:\n - path: /workspace/data/eval.jsonl\n ds_type: json\n # You need to specify a split. For \"json\" datasets the default split is called \"train\".\n split: train\n type: completion\n data_files:\n - /workspace/data/eval.jsonl\n\n# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl:\nrl_beta: # Optional[float]. The beta parameter for the RL training.\n\n# dpo\ndpo_use_weighting: # Optional[bool]. Whether to perform weighting.\nrpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper.\n\n# orpo\norpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping.\n\n# kto\nkto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss.\nkto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss.\n\n# simpo\ncpo_alpha: 1.0 # Weight of the BC regularizer\nsimpo_gamma: 0.5 # Target reward margin for the SimPO loss\n\n# grpo\ntrl:\n use_vllm: # Optional[bool]. Whether to use VLLM for RL training.\n vllm_device: # Optional[str]. Device to use for VLLM.\n vllm_gpu_memory_utilization: # Optional[float]. GPU memory utilization for VLLM.\n vllm_max_model_len: # Optional[int]. Maximum length of the model for VLLM.\n vllm_dtype: # Optional[str]. Data type for VLLM.\n\n beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use\n max_completion_length: # Optional[int]. Maximum length of the completion for RL training.\n\n reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir.\n reward_weights: # Optional[list[float]]. List of reward weights for the reward functions.\n\n num_generations: # Optional[int]. Number of generations to sample.\n log_completions: # Optional[bool]. Whether to log completions.\n\n sync_ref_model: # Optional[bool]. Whether to sync the reference model.\n ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model.\n ref_model_sync_steps: # Optional[int]. Sync steps for the reference model.\n\n\n# reward modelling: `True` or `False`\nreward_model:\n\n# process reward modelling: `True` or `False`\nprocess_reward_model:\n\n# The name of the chat template to use for training, following values are supported:\n# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value.\n# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer.\n# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n# The selected chat template will be saved to the tokenizer_config.json for easier inferencing\n# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template.\nchat_template: tokenizer_default\n# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.\nchat_template_jinja: null\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: You are a helpful assistant. Please give a long and detailed answer.\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: data/last_run_prepared\n# Push prepared dataset to hub\npush_dataset_to_hub: # Optional[str] repo_org/repo_name\n# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`\n# if not set.\ndataset_processes: # defaults to os.cpu_count() if not set\n# Keep dataset in memory while preprocessing\n# Only needed if cached dataset is taking too much storage\ndataset_keep_in_memory:\n# push checkpoints to hub\nhub_model_id: # private repo path to push finetuned model\n# how to push checkpoints to hub\n# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy\nhub_strategy:\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets\n# Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: # boolean\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.\nval_set_size: 0.04\n# Num shards for whole dataset\ndataset_shard_num:\n# Index of shard to use for whole dataset\ndataset_shard_idx:\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: 2048\n# Pad inputs so each step uses constant sized buffers\n# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently\npad_to_sequence_len:\n# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'\nsample_packing:\n# Set to 'false' if getting errors during eval with sample_packing on.\neval_sample_packing:\n# You can set these packing optimizations AFTER starting a training at least once.\n# The trainer will provide recommended values for these values.\nsample_packing_eff_est:\ntotal_num_tokens:\n# Increasing the following values helps with packing, but usually only slightly (<%1.)\n# The number of samples packed at a time.\nsample_packing_group_size: 100000\n# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples.\nsample_packing_bin_size: 200\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation:\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening:\n\n# Passed through to transformers when loading the model when launched without accelerate\n# Use `sequential` when training w/ model parallelism to limit memory\ndevice_map:\n# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.\nmax_memory:\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model\nadapter: lora\n# If you already have a lora model trained that you want to load, put that here.\n# This means after training, if you want to test the model, you should set this to the value of `output_dir`.\n# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir:\n\n# LoRA hyperparameters\n# For more details about the following options, see:\n# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2\nlora_r: 8\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_modules:\n - q_proj\n - v_proj\n# - k_proj\n# - o_proj\n# - gate_proj\n# - down_proj\n# - up_proj\nlora_target_linear: # If true, will target all linear modules\npeft_layers_to_transform: # The layer indices to transform, otherwise, apply to all layers\n\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.\n# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.\n# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\n# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994\nlora_modules_to_save:\n# - embed_tokens\n# - lm_head\n\nlora_fan_in_fan_out: false\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for\n# speed and memory savings\n# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html\nlora_mlp_kernel: true\nlora_qkv_kernel: true\nlora_o_kernel: true\n\n# LoRA+ hyperparameters\n# For more details about the following options, see:\n# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py`\nloraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6.\n\npeft:\n # Configuration options for loftq initialization for LoRA\n # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization\n loftq_config:\n loftq_bits: # typically 4 bits\n\n# ReLoRA configuration\n# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed\nrelora_steps: # Number of steps per ReLoRA restart\nrelora_warmup_steps: # Number of per-restart warmup steps\nrelora_anneal_steps: # Number of anneal steps for each relora cycle\nrelora_prune_ratio: # threshold for optimizer magnitude when pruning\nrelora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings\n\n# wandb configuration if you're using it\n# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.\nwandb_mode: # \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn off wandb\nwandb_project: # Your wandb project name\nwandb_entity: # A wandb Team name if using a Team\nwandb_watch:\nwandb_name: # Set the name of your wandb run\nwandb_run_id: # Set the ID of your wandb run\nwandb_log_model: # \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only at the end of training\n\n# mlflow configuration if you're using it\nmlflow_tracking_uri: # URI to mlflow\nmlflow_experiment_name: # Your experiment name\nmlflow_run_name: # Your run name\nhf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry\n\n# Comet configuration if you're using it\n# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`.\n# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start\nuse_comet: # Enable or disable Comet integration.\ncomet_api_key: # API key for Comet. Recommended to set via `comet login`.\ncomet_workspace: # Workspace name in Comet. Defaults to the user's default workspace.\ncomet_project_name: # Project name in Comet. Defaults to Uncategorized.\ncomet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key.\ncomet_mode: # Create a new experiment (\"create\") or log to an existing one (\"get\"). Default (\"get_or_create\") auto-selects based on configuration.\ncomet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True.\ncomet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details.\n\n# Tensorboard\nuse_tensorboard: # Optional[bool]\n\n# Where to save the full-finetuned model to\noutput_dir: ./completed-model\n\n# Whether to use torch.compile and which backend to use\n# setting to `auto` will enable torch compile when torch>=2.5.1\ntorch_compile: # Optional[Union[Literal[\"auto\"], bool]]\ntorch_compile_backend: # Optional[str]\n\n# Training hyperparameters\n\n# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.\ngradient_accumulation_steps: 1\n# The number of samples to include in each batch. This is the number of samples sent to each GPU.\n# Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: 2\neval_batch_size:\nnum_epochs: 4\nwarmup_steps: 100 # cannot use with warmup_ratio\nwarmup_ratio: 0.05 # cannot use with warmup_steps\nlearning_rate: 0.00003\nlr_quadratic_warmup:\nlogging_steps:\neval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps\nevals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps\neval_strategy: # Set to `\"no\"` to skip evaluation, `\"epoch\"` at end of each epoch, leave empty to infer from `eval_steps`.\nsave_strategy: # Set to `\"no\"` to skip checkpoint saves, `\"epoch\"` at end of each epoch, `\"best\"` when better result is achieved, leave empty to infer from `save_steps`.\nsave_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps\nsaves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsave_total_limit: # Checkpoints saved at a time\n# Maximum number of iterations to train for. It precedes num_epochs which means that\n# if both are set, num_epochs will not be guaranteed.\n# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps:\n\n# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second: # Optional[bool]\n\n# whether to find batch size that fits in memory. Passed to underlying transformers Trainer\nauto_find_batch_size: # Optional[bool]\n\neval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0\neval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128\neval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is [\"sacrebleu\", \"comet\", \"ter\", \"chrf\", \"perplexity\"]\n\nprofiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir.\n # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information\n # snapshots can be visualized @ https://pytorch.org/memory_viz\n\nloss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)\nloss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)\n\n# Save model as safetensors (require safetensors package)\nsave_safetensors:\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: false\n# Group similarly sized data to minimize padding.\n# May be slower to start, as it must download and sort the entire dataset.\n# Note that training loss may have an oscillating pattern with this enabled.\ngroup_by_length: false\n\n# Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: false\n# additional kwargs to pass to the trainer for gradient checkpointing\n# gradient_checkpointing_kwargs:\n# use_reentrant: true\n\n# Stop training after this many evaluation losses have increased in a row\n# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback\nearly_stopping_patience: 3\n\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine\nlr_scheduler_kwargs:\ncosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr\ncosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)\n\n# For one_cycle optim\nlr_div_factor: # Learning rate div factor\n\n# Specify optimizer\n# Valid values are driven by the Transformers OptimizerNames class, see:\n# https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134\n#\n# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of\n# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used\n# in the examples/ for your model and fine-tuning use case.\n#\n# Valid values for 'optimizer' include:\n# - adamw_hf\n# - adamw_torch\n# - adamw_torch_fused\n# - adamw_torch_xla\n# - adamw_apex_fused\n# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1)\n# - adafactor\n# - adamw_anyprecision\n# - sgd\n# - adagrad\n# - adamw_bnb_8bit\n# - lion_8bit\n# - lion_32bit\n# - paged_adamw_32bit\n# - paged_adamw_8bit\n# - paged_lion_32bit\n# - paged_lion_8bit\n# - galore_adamw\n# - galore_adamw_8bit\n# - galore_adafactor\n# - galore_adamw_layerwise\n# - galore_adamw_8bit_layerwise\n# - galore_adafactor_layerwise\noptimizer:\n# Dictionary of arguments to pass to the optimizer\noptim_args:\n# For Galore Optimizers the following optim_args are available\n# rank: # type: int\n# update_proj_gap # type: int\n# scale # type: float\n# proj_type: # type: str, default = std\n\n# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm\noptim_target_modules:\n# - self_attn # for llama\n# - mlp\n\n# Specify weight decay\nweight_decay:\n# adamw hyperparams\nadam_beta1:\nadam_beta2:\nadam_epsilon:\n# Gradient clipping max norm\nmax_grad_norm:\n\n# Augmentation techniques\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings\n# currently only supported on Llama and Mistral\nneftune_noise_alpha:\n\n# Whether to bettertransformers\nflash_optimum:\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers:\nxformers_attention:\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:\nflash_attention:\nflash_attn_cross_entropy: # Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_rms_norm: # Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_fuse_qkv: # Whether to fuse QKV into a single operation\nflash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation\n# Whether to use scaled-dot-product attention\n# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention:\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention:\n# Optional[bool]. Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage:\n# Resume from a specific checkpoint dir\nresume_from_checkpoint:\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: false\n\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank:\n\n# Add or change special tokens.\n# If you add tokens here, you don't need to add them to the `tokens` list.\nspecial_tokens:\n # bos_token: \"<s>\"\n # eos_token: \"</s>\"\n # unk_token: \"<unk>\"\n # pad_token: \"[PAD]\"\n\n# Add extra tokens.\ntokens:\n\n# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.\n# Only works for tokens that are not part of the base vocab (aka are added_tokens).\n# Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: # Dict[int, str]\n# 128041: \"<|im_start|>\"\n# 128042: \"<|im_end|>\"\n\n# FSDP\nfsdp:\nfsdp_config:\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed:\n\n# Advanced DDP Arguments\nddp_timeout:\nddp_bucket_cap_mb:\nddp_broadcast_buffers:\n\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path:\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset:\n\n# Debug mode\ndebug:\n\n# Seed\nseed:\n\n# Allow overwrite yml config using from cli\nstrict:", + "text": "# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files\n# This can also be a relative path to a model on disk\nbase_model: ./llama-7b-hf\n# You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)\nbase_model_ignore_patterns:\n# If the base_model repo on hf hub doesn't include configuration .json files,\n# You can set that here, or leave this empty to default to base_model\nbase_model_config: ./llama-7b-hf\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model:\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config:\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too\nmodel_type: AutoModelForCausalLM\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: AutoTokenizer\n# Trust remote code for untrusted source\ntrust_remote_code:\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast:\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy:\n# Resize the model embeddings when new tokens are added to multiples of 32\n# This is reported to improve training speed on some models\nresize_token_embeddings_to_32x:\n# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings:\n\n# (Internal use only)\n# Used to identify which the model is based on\nis_falcon_derived_model:\nis_llama_derived_model:\nis_qwen_derived_model:\n# Please note that if you set this to true, `padding_side` will be set to \"left\" by default\nis_mistral_derived_model:\n\n# optional overrides to the base model configuration\noverrides_of_model_config:\n # RoPE Scaling https://github.com/huggingface/transformers/pull/24653\n rope_scaling:\n type: # linear | dynamic\n factor: # float\n\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs:\n # use_cache: False\n\n# optional overrides to the bnb 4bit quantization configuration\n# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig\nbnb_config_kwargs:\n # These are default values\n llm_int8_has_fp16_weight: false\n bnb_4bit_quant_type: nf4\n bnb_4bit_use_double_quant: true\n\n\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: true\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: true\n# Use bitsandbytes 4 bit\nload_in_4bit:\n\n# Use CUDA bf16\nbf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere\n# Use CUDA fp16\nfp16: true\n# Use CUDA tf32\ntf32: true # require >=ampere\n\n# No AMP (automatic mixed precision)\nbfloat16: true # require >=ampere\nfloat16: true\n\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset\ngpu_memory_limit: 20GiB\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: true\n\n# List[str]. Add plugins to extend the pipeline.\n# See `src/axolotl/integrations` for the available plugins or doc below for more details.\n# https://axolotl-ai-cloud.github.io/axolotl/docs/custom_integrations.html\nplugins:\n # - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin\n\n# A list of one or more datasets to finetune the model with\ndatasets:\n # HuggingFace dataset repo | s3://,gs:// path | \"json\" for local dataset, make sure to fill data_files\n - path: vicgalle/alpaca-gpt4\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>\n ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file\n data_files: # Optional[str] path to source data files\n\n shards: # Optional[int] split dataset into N pieces (use with shards_idx)\n shards_idx: # Optional[int] = 0 the index of sharded dataset to use\n\n preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)\n\n name: # Optional[str] name of dataset configuration to load\n train_on_split: train # Optional[str] name of dataset split to load from\n revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets.\n trust_remote_code: # Optional[bool] Trust remote code for untrusted source\n\n # Custom user instruction prompt\n - path: repo\n type:\n # The below are defaults. only set what's needed if you use a different column name.\n system_prompt: \"\"\n system_format: \"{system}\"\n field_system: system\n field_instruction: instruction\n field_input: input\n field_output: output\n\n # Customizable to be single line or multi-line\n # Use {instruction}/{input} as key to be replaced\n # 'format' can include {input}\n format: |-\n User: {instruction} {input}\n Assistant:\n # 'no_input_format' cannot include {input}\n no_input_format: \"{instruction} \"\n\n # For `completion` datsets only, uses the provided field instead of `text` column\n field:\n\n # Using chat template\n - path: ...\n # Set type to `chat_template` to use this strategy\n type: chat_template\n # Specify the name of the chat template to use\n # The name of the chat template to use for training, following values are supported:\n # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default.\n # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml.\n # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n chat_template: tokenizer_default\n\n # Custom jinja chat template. Used only if `chat_template: jinja` or empty.\n chat_template_jinja:\n\n # Key containing the messages (default: \"messages\")\n field_messages: messages\n\n # Mapping of properties from the input dataset to the chat template.\n # (default: message_property_mappings={'role':'role', 'content':'content'})\n # If a property exists in the template but not in this mapping, the system will attempt\n # to load it directly from the message using the property name as the key.\n # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role',\n # while 'value' is loaded and used as 'content' in the chat template.\n message_property_mappings:\n role: from\n content: value\n # ...\n\n # Optional[Dict[str, List]]. Roles mapping in the messages. The default is:\n roles:\n user: [\"human\", \"user\"]\n assistant: [\"gpt\", \"assistant\"]\n system: [\"system\"]\n tool: [\"tool\"]\n\n # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If you wish to,\n # we recommend using a custom jinja template with the default system message removed or\n # adding a system turn with empty content.\n drop_system_message:\n\n # IMPORTANT: The following fields determine which parts of the conversation to train on.\n # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train\n # See examples at `docs/dataset-formats/conversation.qmd`\n # Note: If the below 4 fields are set to empty, defaults to training only on the last message.\n\n # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: [\"assistant\"] # default\n # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are:\n # - all: train on all EOS tokens\n # - turn (default): train on the EOS token at the end of each trainable turn\n # - last: train on the last EOS token in the conversation\n # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.\n train_on_eos: last\n # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.\n message_field_training: training\n # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.\n # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train).\n message_field_training_detail: train_detail\n\n\n# If false, the datasets will not be shuffled and will keep their original order in `datasets`.\n# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: true\n\nDeduplicates datasets and test_datasets with identical entries.\ndataset_exact_deduplication: true\n\n# A list of one or more datasets to eval the model with.\n# You can use either test_datasets, or val_set_size, but not both.\ntest_datasets:\n - path: /workspace/data/eval.jsonl\n ds_type: json\n # You need to specify a split. For \"json\" datasets the default split is called \"train\".\n split: train\n type: completion\n data_files:\n - /workspace/data/eval.jsonl\n\n# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl:\nrl_beta: # Optional[float]. The beta parameter for the RL training.\n\n# dpo\ndpo_use_weighting: # Optional[bool]. Whether to perform weighting.\nrpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper.\n\n# orpo\norpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping.\n\n# kto\nkto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss.\nkto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss.\n\n# simpo\ncpo_alpha: 1.0 # Weight of the BC regularizer\nsimpo_gamma: 0.5 # Target reward margin for the SimPO loss\n\n# grpo\ntrl:\n use_vllm: # Optional[bool]. Whether to use VLLM for RL training.\n vllm_device: # Optional[str]. Device to use for VLLM.\n vllm_gpu_memory_utilization: # Optional[float]. GPU memory utilization for VLLM.\n vllm_max_model_len: # Optional[int]. Maximum length of the model for VLLM.\n vllm_dtype: # Optional[str]. Data type for VLLM.\n\n beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use\n max_completion_length: # Optional[int]. Maximum length of the completion for RL training.\n\n reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir.\n reward_weights: # Optional[list[float]]. List of reward weights for the reward functions.\n\n num_generations: # Optional[int]. Number of generations to sample.\n log_completions: # Optional[bool]. Whether to log completions.\n\n sync_ref_model: # Optional[bool]. Whether to sync the reference model.\n ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model.\n ref_model_sync_steps: # Optional[int]. Sync steps for the reference model.\n\n\n# reward modelling: `True` or `False`\nreward_model:\n\n# process reward modelling: `True` or `False`\nprocess_reward_model:\n\n# The name of the chat template to use for training, following values are supported:\n# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value.\n# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer.\n# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n# The selected chat template will be saved to the tokenizer_config.json for easier inferencing\n# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template.\nchat_template: tokenizer_default\n# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.\nchat_template_jinja: null\n# Changes the default system message. Currently only supports chatml.\ndefault_system_message: You are a helpful assistant. Please give a long and detailed answer.\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: data/last_run_prepared\n# Push prepared dataset to hub\npush_dataset_to_hub: # Optional[str] repo_org/repo_name\n# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`\n# if not set.\ndataset_processes: # defaults to os.cpu_count() if not set\n# Keep dataset in memory while preprocessing\n# Only needed if cached dataset is taking too much storage\ndataset_keep_in_memory:\n# push checkpoints to hub\nhub_model_id: # private repo path to push finetuned model\n# how to push checkpoints to hub\n# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy\nhub_strategy:\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets\n# Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: # boolean\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.\nval_set_size: 0.04\n# Num shards for whole dataset\ndataset_shard_num:\n# Index of shard to use for whole dataset\ndataset_shard_idx:\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: 2048\n# Pad inputs so each step uses constant sized buffers\n# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently\npad_to_sequence_len:\n# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'\nsample_packing:\n# Set to 'false' if getting errors during eval with sample_packing on.\neval_sample_packing:\n# You can set these packing optimizations AFTER starting a training at least once.\n# The trainer will provide recommended values for these values.\nsample_packing_eff_est:\ntotal_num_tokens:\n# Increasing the following values helps with packing, but usually only slightly (<%1.)\n# The number of samples packed at a time.\nsample_packing_group_size: 100000\n# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples.\nsample_packing_bin_size: 200\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation:\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening:\n\n# Passed through to transformers when loading the model when launched without accelerate\n# Use `sequential` when training w/ model parallelism to limit memory\ndevice_map:\n# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.\nmax_memory:\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model\nadapter: lora\n# If you already have a lora model trained that you want to load, put that here.\n# This means after training, if you want to test the model, you should set this to the value of `output_dir`.\n# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir:\n\n# LoRA hyperparameters\n# For more details about the following options, see:\n# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2\nlora_r: 8\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_modules:\n - q_proj\n - v_proj\n# - k_proj\n# - o_proj\n# - gate_proj\n# - down_proj\n# - up_proj\nlora_target_linear: # If true, will target all linear modules\npeft_layers_to_transform: # The layer indices to transform, otherwise, apply to all layers\n\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.\n# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.\n# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\n# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994\nlora_modules_to_save:\n# - embed_tokens\n# - lm_head\n\nlora_fan_in_fan_out: false\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for\n# speed and memory savings\n# See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html\nlora_mlp_kernel: true\nlora_qkv_kernel: true\nlora_o_kernel: true\n\n# LoRA+ hyperparameters\n# For more details about the following options, see:\n# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py`\nloraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6.\n\npeft:\n # Configuration options for loftq initialization for LoRA\n # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization\n loftq_config:\n loftq_bits: # typically 4 bits\n\n# ReLoRA configuration\n# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed\nrelora_steps: # Number of steps per ReLoRA restart\nrelora_warmup_steps: # Number of per-restart warmup steps\nrelora_anneal_steps: # Number of anneal steps for each relora cycle\nrelora_prune_ratio: # threshold for optimizer magnitude when pruning\nrelora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings\n\n# wandb configuration if you're using it\n# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.\nwandb_mode: # \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn off wandb\nwandb_project: # Your wandb project name\nwandb_entity: # A wandb Team name if using a Team\nwandb_watch:\nwandb_name: # Set the name of your wandb run\nwandb_run_id: # Set the ID of your wandb run\nwandb_log_model: # \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only at the end of training\n\n# mlflow configuration if you're using it\nmlflow_tracking_uri: # URI to mlflow\nmlflow_experiment_name: # Your experiment name\nmlflow_run_name: # Your run name\nhf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry\n\n# Comet configuration if you're using it\n# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`.\n# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start\nuse_comet: # Enable or disable Comet integration.\ncomet_api_key: # API key for Comet. Recommended to set via `comet login`.\ncomet_workspace: # Workspace name in Comet. Defaults to the user's default workspace.\ncomet_project_name: # Project name in Comet. Defaults to Uncategorized.\ncomet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key.\ncomet_mode: # Create a new experiment (\"create\") or log to an existing one (\"get\"). Default (\"get_or_create\") auto-selects based on configuration.\ncomet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True.\ncomet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details.\n\n# Tensorboard\nuse_tensorboard: # Optional[bool]\n\n# Where to save the full-finetuned model to\noutput_dir: ./completed-model\n\n# Whether to use torch.compile and which backend to use\n# setting to `auto` will enable torch compile when torch>=2.5.1\ntorch_compile: # Optional[Union[Literal[\"auto\"], bool]]\ntorch_compile_backend: # Optional[str]\n\n# Training hyperparameters\n\n# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.\ngradient_accumulation_steps: 1\n# The number of samples to include in each batch. This is the number of samples sent to each GPU.\n# Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: 2\neval_batch_size:\nnum_epochs: 4\nwarmup_steps: 100 # cannot use with warmup_ratio\nwarmup_ratio: 0.05 # cannot use with warmup_steps\nlearning_rate: 0.00003\nlr_quadratic_warmup:\nlogging_steps:\neval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps\nevals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps\neval_strategy: # Set to `\"no\"` to skip evaluation, `\"epoch\"` at end of each epoch, leave empty to infer from `eval_steps`.\nsave_strategy: # Set to `\"no\"` to skip checkpoint saves, `\"epoch\"` at end of each epoch, `\"best\"` when better result is achieved, leave empty to infer from `save_steps`.\nsave_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps\nsaves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsave_total_limit: # Checkpoints saved at a time\n# Maximum number of iterations to train for. It precedes num_epochs which means that\n# if both are set, num_epochs will not be guaranteed.\n# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps:\n\n# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second: # Optional[bool]\n\n# whether to find batch size that fits in memory. Passed to underlying transformers Trainer\nauto_find_batch_size: # Optional[bool]\n\neval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0\neval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128\neval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is [\"sacrebleu\", \"comet\", \"ter\", \"chrf\", \"perplexity\"]\n\nprofiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir.\n # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information\n # snapshots can be visualized @ https://pytorch.org/memory_viz\n\nloss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)\nloss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)\n\n# Save model as safetensors (require safetensors package)\nsave_safetensors:\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: false\n# Group similarly sized data to minimize padding.\n# May be slower to start, as it must download and sort the entire dataset.\n# Note that training loss may have an oscillating pattern with this enabled.\ngroup_by_length: false\n\n# Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: false\n# additional kwargs to pass to the trainer for gradient checkpointing\n# gradient_checkpointing_kwargs:\n# use_reentrant: true\n\n# Stop training after this many evaluation losses have increased in a row\n# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback\nearly_stopping_patience: 3\n\n# Specify a scheduler and kwargs to use with the optimizer\nlr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine\nlr_scheduler_kwargs:\ncosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr\ncosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)\n\n# For one_cycle optim\nlr_div_factor: # Learning rate div factor\n\n# Specify optimizer\n# Valid values are driven by the Transformers OptimizerNames class, see:\n# https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134\n#\n# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of\n# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used\n# in the examples/ for your model and fine-tuning use case.\n#\n# Valid values for 'optimizer' include:\n# - adamw_hf\n# - adamw_torch\n# - adamw_torch_fused\n# - adamw_torch_xla\n# - adamw_apex_fused\n# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1)\n# - adafactor\n# - adamw_anyprecision\n# - sgd\n# - adagrad\n# - adamw_bnb_8bit\n# - lion_8bit\n# - lion_32bit\n# - paged_adamw_32bit\n# - paged_adamw_8bit\n# - paged_lion_32bit\n# - paged_lion_8bit\n# - galore_adamw\n# - galore_adamw_8bit\n# - galore_adafactor\n# - galore_adamw_layerwise\n# - galore_adamw_8bit_layerwise\n# - galore_adafactor_layerwise\noptimizer:\n# Dictionary of arguments to pass to the optimizer\noptim_args:\n# For Galore Optimizers the following optim_args are available\n# rank: # type: int\n# update_proj_gap # type: int\n# scale # type: float\n# proj_type: # type: str, default = std\n\n# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm\noptim_target_modules:\n# - self_attn # for llama\n# - mlp\n\n# Specify weight decay\nweight_decay:\n# adamw hyperparams\nadam_beta1:\nadam_beta2:\nadam_epsilon:\n# Gradient clipping max norm\nmax_grad_norm:\n\n# Augmentation techniques\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings\n# currently only supported on Llama and Mistral\nneftune_noise_alpha:\n\n# Whether to bettertransformers\nflash_optimum:\n# Whether to use xformers attention patch https://github.com/facebookresearch/xformers:\nxformers_attention:\n# Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:\nflash_attention:\nflash_attn_cross_entropy: # Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_rms_norm: # Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_fuse_qkv: # Whether to fuse QKV into a single operation\nflash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation\n# Whether to use scaled-dot-product attention\n# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention:\n# Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention:\n# Optional[bool]. Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage:\n# Resume from a specific checkpoint dir\nresume_from_checkpoint:\n# If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: false\n\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank:\n\n# Add or change special tokens.\n# If you add tokens here, you don't need to add them to the `tokens` list.\nspecial_tokens:\n # bos_token: \"<s>\"\n # eos_token: \"</s>\"\n # unk_token: \"<unk>\"\n # pad_token: \"[PAD]\"\n\n# Add extra tokens.\ntokens:\n\n# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.\n# Only works for tokens that are not part of the base vocab (aka are added_tokens).\n# Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: # Dict[int, str]\n# 128041: \"<|im_start|>\"\n# 128042: \"<|im_end|>\"\n\n# FSDP\nfsdp:\nfsdp_config:\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed:\n\n# Advanced DDP Arguments\nddp_timeout:\nddp_bucket_cap_mb:\nddp_broadcast_buffers:\n\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path:\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset:\n\n# Debug mode\ndebug:\n\n# Seed\nseed:\n\n# Allow overwrite yml config using from cli\nstrict:", "crumbs": [ "Getting Started", "Config Reference" @@ -262,7 +262,7 @@ "href": "docs/installation.html#sec-cloud", "title": "Installation", "section": "3 Cloud Environments", - "text": "3 Cloud Environments\n\n3.1 Cloud GPU Providers\nFor providers supporting Docker:\n\nUse axolotlai/axolotl-cloud:main-latest\nAvailable on:\n\nLatitude.sh\nJarvisLabs.ai\nRunPod\n\n\n\n\n3.2 Google Colab\nUse our example notebook.", + "text": "3 Cloud Environments\n\n3.1 Cloud GPU Providers\nFor providers supporting Docker:\n\nUse axolotlai/axolotl-cloud:main-latest\nAvailable on:\n\nLatitude.sh\nJarvisLabs.ai\nRunPod\nNovita\n\n\n\n\n3.2 Google Colab\nUse our example notebook.", "crumbs": [ "Getting Started", "Installation" @@ -1115,6 +1115,17 @@ "Custom Integrations" ] }, + { + "objectID": "docs/custom_integrations.html#adding-a-new-integration", + "href": "docs/custom_integrations.html#adding-a-new-integration", + "title": "Custom Integrations", + "section": "Adding a new integration", + "text": "Adding a new integration\nPlugins can be used to customize the behavior of the training pipeline through hooks. See axolotl.integrations.BasePlugin for the possible hooks.\nTo add a new integration, please follow these steps:\n\nCreate a new folder in the src/axolotl/integrations directory.\nAdd any relevant files (LICENSE, README.md, ACKNOWLEDGEMENTS.md, etc.) to the new folder.\nAdd __init__.py and args.py files to the new folder.\n\n\n__init__.py should import the integration and hook into the appropriate functions.\nargs.py should define the arguments for the integration.\n\n\n(If applicable) Add CPU tests under tests/integrations or GPU tests under tests/e2e/integrations.\n\n\n\n\n\n\n\nTip\n\n\n\nSee src/axolotl/integrations/cut_cross_entropy for a minimal integration example.\n\n\n\n\n\n\n\n\nWarning\n\n\n\nIf you could not load your integration, please ensure you are pip installing in editable mode.\npip install -e .\nand correctly spelled the integration name in the config file.\nplugins:\n - axolotl.integrations.your_integration_name.YourIntegrationPlugin\n\n\n\n\n\n\n\n\nNote\n\n\n\nIt is not necessary to place your integration in the integrations folder. It can be in any location, so long as it’s installed in a package in your python env.\nSee this repo for an example: https://github.com/axolotl-ai-cloud/diff-transformer", + "crumbs": [ + "Advanced Features", + "Custom Integrations" + ] + }, { "objectID": "docs/mac.html", "href": "docs/mac.html", diff --git a/sitemap.xml b/sitemap.xml index f5b7b5faa..4038ba135 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,162 +2,162 @@ https://axolotl-ai-cloud.github.io/axolotl/examples/colab-notebooks/colab-axolotl-example.html - 2025-03-21T14:17:36.997Z + 2025-03-21T14:18:40.146Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/stepwise_supervised.html - 2025-03-21T14:17:36.993Z + 2025-03-21T14:18:40.142Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/template_free.html - 2025-03-21T14:17:36.993Z + 2025-03-21T14:18:40.142Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/tokenized.html - 2025-03-21T14:17:36.993Z + 2025-03-21T14:18:40.142Z https://axolotl-ai-cloud.github.io/axolotl/docs/nccl.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/amd_hpc.html - 2025-03-21T14:17:36.992Z + 2025-03-21T14:18:40.141Z https://axolotl-ai-cloud.github.io/axolotl/docs/config.html - 2025-03-21T14:17:36.992Z + 2025-03-21T14:18:40.141Z https://axolotl-ai-cloud.github.io/axolotl/docs/multi-gpu.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/installation.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/torchao.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.146Z https://axolotl-ai-cloud.github.io/axolotl/docs/reward_modelling.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/input_output.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/multimodal.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/getting-started.html - 2025-03-21T14:17:36.993Z + 2025-03-21T14:18:40.142Z https://axolotl-ai-cloud.github.io/axolotl/docs/inference.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/multipack.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/debugging.html - 2025-03-21T14:17:36.993Z + 2025-03-21T14:18:40.142Z https://axolotl-ai-cloud.github.io/axolotl/docs/lr_groups.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/TODO.html - 2025-03-21T14:17:36.991Z + 2025-03-21T14:18:40.140Z https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/LICENSE.html - 2025-03-21T14:17:37.011Z + 2025-03-21T14:18:40.160Z https://axolotl-ai-cloud.github.io/axolotl/index.html - 2025-03-21T14:17:37.008Z + 2025-03-21T14:18:40.157Z https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html - 2025-03-21T14:17:37.012Z + 2025-03-21T14:18:40.160Z https://axolotl-ai-cloud.github.io/axolotl/FAQS.html - 2025-03-21T14:17:36.991Z + 2025-03-21T14:18:40.140Z https://axolotl-ai-cloud.github.io/axolotl/docs/multi-node.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/faq.html - 2025-03-21T14:17:36.993Z + 2025-03-21T14:18:40.142Z https://axolotl-ai-cloud.github.io/axolotl/docs/batch_vs_grad.html - 2025-03-21T14:17:36.992Z + 2025-03-21T14:18:40.141Z https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/rlhf.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/cli.html - 2025-03-21T14:17:36.992Z + 2025-03-21T14:18:40.141Z https://axolotl-ai-cloud.github.io/axolotl/docs/unsloth.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.146Z https://axolotl-ai-cloud.github.io/axolotl/docs/fsdp_qlora.html - 2025-03-21T14:17:36.993Z + 2025-03-21T14:18:40.142Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset_preprocessing.html - 2025-03-21T14:17:36.993Z + 2025-03-21T14:18:40.142Z https://axolotl-ai-cloud.github.io/axolotl/docs/custom_integrations.html - 2025-03-21T14:17:36.992Z + 2025-03-21T14:18:40.141Z https://axolotl-ai-cloud.github.io/axolotl/docs/mac.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/docker.html - 2025-03-21T14:17:36.993Z + 2025-03-21T14:18:40.142Z https://axolotl-ai-cloud.github.io/axolotl/docs/ray-integration.html - 2025-03-21T14:17:36.996Z + 2025-03-21T14:18:40.145Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/index.html - 2025-03-21T14:17:36.992Z + 2025-03-21T14:18:40.142Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/conversation.html - 2025-03-21T14:17:36.992Z + 2025-03-21T14:18:40.142Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/pretraining.html - 2025-03-21T14:17:36.993Z + 2025-03-21T14:18:40.142Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/inst_tune.html - 2025-03-21T14:17:36.992Z + 2025-03-21T14:18:40.142Z