diff --git a/.github/workflows/preview-docs.yml b/.github/workflows/preview-docs.yml index 5af70b0dc..f93cfa660 100644 --- a/.github/workflows/preview-docs.yml +++ b/.github/workflows/preview-docs.yml @@ -8,7 +8,9 @@ on: paths: - '**/*.md' # any Markdown file - '**/*.qmd' # any Quarto file - - '_quarto.yaml' + - '_quarto.yml' + - docs/scripts/generate_config_docs.py + - src/axolotl/utils/schemas/**.py permissions: checks: write @@ -38,7 +40,7 @@ jobs: - name: Install dependencies run: | python3 -m pip install jupyter quartodoc - python3 -m pip install -e . --no-deps + python3 -m pip install -e . - name: Build autodoc run: quartodoc build diff --git a/.runpod/README.md b/.runpod/README.md index a631c3937..60c661eef 100644 --- a/.runpod/README.md +++ b/.runpod/README.md @@ -328,7 +328,7 @@ The following optimizers are supported: - Use `gradient_checkpointing: true` to reduce memory usage - Adjust `micro_batch_size` and `gradient_accumulation_steps` based on your GPU memory -For more detailed information, please refer to the [documentation](https://axolotl-ai-cloud.github.io/axolotl/docs/config.html). +For more detailed information, please refer to the [documentation](https://axolotl-ai-cloud.github.io/axolotl/docs/config-reference.html). ### Errors: diff --git a/README.md b/README.md index ef5523898..3bfce8df1 100644 --- a/README.md +++ b/README.md @@ -89,7 +89,7 @@ That's it! Check out our [Getting Started Guide](https://docs.axolotl.ai/docs/ge ## 📚 Documentation - [Installation Options](https://docs.axolotl.ai/docs/installation.html) - Detailed setup instructions for different environments -- [Configuration Guide](https://docs.axolotl.ai/docs/config.html) - Full configuration options and examples +- [Configuration Guide](https://docs.axolotl.ai/docs/config-reference.html) - Full configuration options and examples - [Dataset Loading](https://docs.axolotl.ai/docs/dataset_loading.html) - Loading datasets from various sources - [Dataset Guide](https://docs.axolotl.ai/docs/dataset-formats/) - Supported formats and how to use them - [Multi-GPU Training](https://docs.axolotl.ai/docs/multi-gpu.html) diff --git a/_quarto.yml b/_quarto.yml index 9b97095ce..93141aa9e 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -1,5 +1,6 @@ project: type: website + pre-render: docs/scripts/generate_config_docs.py quartodoc: dir: docs/api @@ -235,7 +236,7 @@ website: - docs/installation.qmd - docs/inference.qmd - docs/cli.qmd - - docs/config.qmd + - docs/config-reference.qmd - text: "API Reference" href: docs/api diff --git a/docs/.gitignore b/docs/.gitignore index 6c3cb2070..89407326f 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -2,3 +2,4 @@ _site/ /api/*.qmd /api/*.html +config-reference.qmd diff --git a/docs/config.qmd b/docs/config.qmd deleted file mode 100644 index d146b4c84..000000000 --- a/docs/config.qmd +++ /dev/null @@ -1,801 +0,0 @@ ---- -title: Config Reference -description: A complete list of all configuration options. ---- - -```yaml -# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files -# This can also be a relative path to a model on disk -base_model: ./llama-7b-hf -# You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc) -base_model_ignore_patterns: -# If the base_model repo on hf hub doesn't include configuration .json files, -# You can set that here, or leave this empty to default to base_model -base_model_config: ./llama-7b-hf -# You can specify to choose a specific model revision from huggingface hub -revision_of_model: -# Optional tokenizer configuration path in case you want to use a different tokenizer -# than the one defined in the base model -tokenizer_config: -# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too -model_type: AutoModelForCausalLM -# Corresponding tokenizer for the model AutoTokenizer is a good choice -tokenizer_type: AutoTokenizer -# Trust remote code for untrusted source -trust_remote_code: -# use_fast option for tokenizer loading from_pretrained, default to True -tokenizer_use_fast: -# Whether to use the legacy tokenizer setting, defaults to True -tokenizer_legacy: -# Whether to use mistral-common tokenizer. If set to True, it will use the mistral-common tokenizer. -tokenizer_use_mistral_common: -# Resize the model embeddings when new tokens are added to multiples of 32 -# This is reported to improve training speed on some models -resize_token_embeddings_to_32x: -# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink. -shrink_embeddings: -# Optional[bool] Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs -embeddings_skip_upcast: -# Whether to load the model with randomly initialized weights. Useful for -# pre-training a model from scratch or debugging purposes. -random_init_weights: - -# (Internal use only) -# Used to identify which the model is based on -is_falcon_derived_model: -is_llama_derived_model: -is_qwen_derived_model: -# Please note that if you set this to true, `padding_side` will be set to "left" by default -is_mistral_derived_model: - -# optional overrides to the base model configuration -overrides_of_model_config: - # RoPE Scaling https://github.com/huggingface/transformers/pull/24653 - rope_scaling: - type: # linear | dynamic - factor: # float - -# optional overrides the base model loading from_pretrained -overrides_of_model_kwargs: - # use_cache: False - -# optional overrides to the bnb 4bit quantization configuration -# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig -bnb_config_kwargs: - # These are default values - llm_int8_has_fp16_weight: false - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: true - -# quantization aware training -qat: - activation_dtype: # Optional[str] = "int8". Fake quantization layout to use for activation quantization. Valid options are "int4" and "int8" - weight_dtype: # Optional[str] = "int8". Fake quantization layout to use for weight quantization. Valid options are "int4" and "int8" - group_size: # Optional[int] = 32. The number of elements in each group for per-group fake quantization - fake_quant_after_n_steps: # Optional[int] = None. The number of steps to apply fake quantization after - -# post-training quantization -quantization: - weight_dtype: # Optional[str] = "int8". Fake quantization layout to use for weight quantization. Valid options are uintX for X in [1, 2, 3, 4, 5, 6, 7], or int4, or int8 - activation_dtype: # Optional[str] = "int8". Fake quantization layout to use for activation quantization. Valid options are "int4" and "int8" - group_size: # Optional[int] = 32. The number of elements in each group for per-group fake quantization - quantize_embedding: # Optional[bool] = False. Whether to quantize the embedding layer. - - -# Whether you are training a 4-bit GPTQ quantized model -gptq: true - -# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer -load_in_8bit: true -# Use bitsandbytes 4 bit -load_in_4bit: - -# Use CUDA bf16 -bf16: true # bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection. require >=ampere -# Use CUDA fp16 -fp16: true -# Use CUDA tf32 -tf32: true # require >=ampere -# Note: if bf16 is set to 'auto', and fp16 is set to true, we will prefer the explict fp16 setting - -# No AMP (automatic mixed precision) -bfloat16: true # require >=ampere -float16: true - -# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset -gpu_memory_limit: 20GiB -# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge -lora_on_cpu: true - -# List[str]. Add plugins to extend the pipeline. -# See `src/axolotl/integrations` for the available plugins or doc below for more details. -# https://docs.axolotl.ai/docs/custom_integrations.html -plugins: - # - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin - -# A list of one or more datasets to finetune the model with -# See https://docs.axolotl.ai/docs/dataset_loading.html for guide on loading datasets -# See https://docs.axolotl.ai/docs/dataset-formats/ for guide on dataset formats -datasets: - # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory - - path: vicgalle/alpaca-gpt4 - # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection] - type: alpaca # format | format: (chat/instruct) | .load_ - ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file - data_files: # Optional[str] path to source data files - - shards: # Optional[int] split dataset into N pieces (use with shards_idx) - shards_idx: # Optional[int] = 0 the index of sharded dataset to use - - preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`) - - name: # Optional[str] name of dataset configuration to load - split: train # Optional[str] name of dataset split to load from - revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets. - trust_remote_code: # Optional[bool] Trust remote code for untrusted source - - # Custom user instruction prompt - - path: repo - type: - # The below are defaults. only set what's needed if you use a different column name. - system_prompt: "" - system_format: "{system}" - field_system: system - field_instruction: instruction - field_input: input - field_output: output - - # Customizable to be single line or multi-line - # Use {instruction}/{input} as key to be replaced - # 'format' can include {input} - format: |- - User: {instruction} {input} - Assistant: - # 'no_input_format' cannot include {input} - no_input_format: "{instruction} " - - # For `completion` datsets only, uses the provided field instead of `text` column - field: - - # Using chat template - - path: ... - # Set type to `chat_template` to use this strategy - type: chat_template - # Specify the name of the chat template to use - # The name of the chat template to use for training, following values are supported: - # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default. - # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py - # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml. - # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field. - chat_template: tokenizer_default - - # Custom jinja chat template. Used only if `chat_template: jinja` or empty. - chat_template_jinja: - - # Key containing the messages (default: "messages") - field_messages: messages - - # Key containing the tools (default: "tools") - # Must be a list[dict] and follow [JSON schema](https://json-schema.org/learn/getting-started-step-by-step). - field_tools: tools - - # Key containing the system message (default: "system") - # If the system message is not present in the dataset sample, it will be loaded from the field_system property. - field_system: system - - # Mapping of properties from the input dataset to the chat template. - # (default: message_property_mappings={'role':'role', 'content':'content'}) - # If a property exists in the template but not in this mapping, the system will attempt - # to load it directly from the message using the property name as the key. - # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role', - # while 'value' is loaded and used as 'content' in the chat template. - message_property_mappings: - role: from - content: value - # ... - - # Optional[Dict[str, List]]. Roles mapping in the messages. - # The format is {target_role: [source_roles]}. All source roles will be mapped to the target role. - # The default is: - roles: - user: ["human", "user"] - assistant: ["gpt", "assistant"] - system: ["system"] - tool: ["tool"] - - # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template. - # This does not drop the default system message from chat_template if it exists. If you wish to, - # we recommend using a custom jinja template with the default system message removed or - # adding a system turn with empty content. - drop_system_message: - - # Optional[bool]. (for Qwen3 template only) Whether to split the assistant content based on a reasoning trace inside delimited tags - # See example at `docs/dataset-formats/conversation.qmd` - split_thinking: - - # IMPORTANT: The following fields determine which parts of the conversation to train on. - # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train - # See examples at `docs/dataset-formats/conversation.qmd` - # Note: If the below 5 fields are empty, defaults to training only on the last message. - - # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss. - roles_to_train: ["assistant"] # default - # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are: - # - all: train on all EOS tokens - # - turn (default): train on the EOS token at the end of each trainable turn - # - last: train on the last EOS token in the conversation - # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`. - train_on_eos: turn - # Optional[str]. Which EOT (End-of-Turn) tokens to train on in the conversation. Possible values are: - # - all: train on all EOT tokens - # - turn: train on the EOT token at the end of each trainable turn - # - last: train on the last EOT token in the conversation - # If not specified, defaults to the value of train_on_eos for backward compatibility. - train_on_eot: - # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`. - message_field_training: training - # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn. - # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train). - message_field_training_detail: train_detail - - -# If false, the datasets will not be shuffled and will keep their original order in `datasets`. -# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true. -shuffle_merged_datasets: true - -# Deduplicates datasets and test_datasets with identical entries. -dataset_exact_deduplication: true - -# A list of one or more datasets to eval the model with. -# You can use either test_datasets, or val_set_size, but not both. -test_datasets: - - path: /workspace/data/eval.jsonl - ds_type: json - # You need to specify a split. For "json" datasets the default split is called "train". - split: train - type: completion - data_files: - - /workspace/data/eval.jsonl - -# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo' -rl: -rl_beta: # Optional[float]. The beta parameter for the RL training. - -# dpo -dpo_use_weighting: # Optional[bool]. Whether to perform weighting. -rpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper. - -# orpo -orpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping. - -# kto -kto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss. -kto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss. - -# simpo -cpo_alpha: 1.0 # Weight of the BC regularizer -simpo_gamma: 0.5 # Target reward margin for the SimPO loss - -# grpo -trl: - use_vllm: # Optional[bool]. Whether to use VLLM for RL training. - vllm_server_host: # Optional[str]. Host of the vLLM server to connect to. - vllm_server_port: # Optional[int]. Port of the vLLM server to connect to. - vllm_server_timeout: # Optional[int]. Total timeout (in seconds) to wait for the vLLM server to respond. - vllm_guided_decoding_regex: # Optional[str]. Regex for vLLM guided decoding. - - beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use - max_completion_length: # Optional[int]. Maximum length of the completion for RL training. - - reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir. - reward_weights: # Optional[list[float]]. List of reward weights for the reward functions. - - num_generations: # Optional[int]. Number of generations to sample. - log_completions: # Optional[bool]. Whether to log completions. - num_completions_to_print: # Optional[int]. Number of completions to print when log_completions is True. - - sync_ref_model: # Optional[bool]. Whether to sync the reference model. - ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model. - ref_model_sync_steps: # Optional[int]. Sync steps for the reference model. - scale_rewards: # Optional[bool]. Whether to scale rewards by their standard deviation. - - temperature: # Optional[float]. Sampling temperature for the GRPO policy. - top_p: # Optional[float]. Top-p sampling probability for the generation policy. - top_k: # Optional[int]. Top-k sampling for the generation policy. - min_p: # Optional[float]. Minimum probability for the generation policy. - repetition_penalty: # Optional[float]. Penalty for tokens that appear in prompt and generated text. - - num_iterations: # Optional[int]. Number of iterations per batch (μ) for GRPO. - epsilon: # Optional[float]. Epsilon value for clipping in the GRPO algorithm. - epsilon_high: # Optional[float]. Upper-bound epsilon value for clipping in the GRPO algorithm. - use_liger_loss: # Optional[bool]. Whether to use Liger loss for GRPO. - loss_type: # Optional[str]. Loss formulation to use. Supported values: grpo, bnpo, dr_grpo. - mask_truncated_completions: # Optional[bool]. Whether to exclude truncated completions from loss calculation. - - -# reward modelling: `True` or `False` -reward_model: - -# process reward modelling: `True` or `False` -process_reward_model: - -# The name of the chat template to use for training, following values are supported: -# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value. -# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py -# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer. -# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field. -# The selected chat template will be saved to the tokenizer_config.json for easier inferencing -# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template. -chat_template: tokenizer_default -# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null. -chat_template_jinja: null -# Optional[List[str]]. Custom EOT (End-of-Turn) tokens to mask/unmask during training. -# These tokens mark the boundaries between conversation turns. -# For example: ["/INST", "", "[/SYSTEM_PROMPT]"] -# If not specified, defaults to just the model's eos_token. -# This is useful for templates that use multiple delimiter tokens. -eot_tokens: - # - "" - # - "[/INST]" - # - "[/SYSTEM_PROMPT]" -# Changes the default system message -default_system_message: You are a helpful assistant. Please give a long and detailed answer. # Currently only supports chatml. -# Axolotl attempts to save the dataset as an arrow after packing the data together so -# subsequent training attempts load faster, relative path -dataset_prepared_path: data/last_run_prepared -# Push prepared dataset to hub -push_dataset_to_hub: # Optional[str] repo_org/repo_name -# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()` -# if not set. -dataset_processes: # defaults to os.cpu_count() if not set -# Keep dataset in memory while preprocessing -# Only needed if cached dataset is taking too much storage -dataset_keep_in_memory: -# push checkpoints to hub -hub_model_id: # private repo path to push finetuned model -# how to push checkpoints to hub -# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy -hub_strategy: -# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets -# Required to be true when used in combination with `push_dataset_to_hub` -hf_use_auth_token: # boolean -# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval. -val_set_size: 0.04 -# Num shards for whole dataset -dataset_shard_num: -# Index of shard to use for whole dataset -dataset_shard_idx: - -# The maximum length of an input to train with, this should typically be less than 2048 -# as most models have a token/context limit of 2048 -sequence_len: 2048 -# Pad inputs so each step uses constant sized buffers -# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently -pad_to_sequence_len: -# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true' -sample_packing: -# Set to 'false' if getting errors during eval with sample_packing on. -eval_sample_packing: -# You can set these packing optimizations AFTER starting a training at least once. -# The trainer will provide recommended values for these values. -sample_packing_eff_est: -total_num_tokens: -# Increasing the following values helps with packing, but usually only slightly (<%1.) -# The number of samples packed at a time. -sample_packing_group_size: 100000 -# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples. -sample_packing_bin_size: 200 -sample_pack_sequentially: # Optional[bool]. Whether to pack samples sequentially. - -# whether to concatenate samples during pretraining -pretraining_sample_concatenation: - -curriculum_sampling: # Optional[bool]. Whether to use sequential sampling for curriculum learning - -# Use batch flattening for speedups when not using sample_packing -batch_flattening: - -# Passed through to transformers when loading the model when launched without accelerate -# Use `sequential` when training w/ model parallelism to limit memory -device_map: -# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model. -max_memory: - -# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model -adapter: lora -# If you already have a lora model trained that you want to load, put that here. -# This means after training, if you want to test the model, you should set this to the value of `output_dir`. -# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`. -lora_model_dir: - -# LoRA hyperparameters -# For more details about the following options, see: -# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2 -lora_r: 8 -lora_alpha: 16 -lora_dropout: 0.05 -lora_target_modules: - - q_proj - - v_proj -# - k_proj -# - o_proj -# - gate_proj -# - down_proj -# - up_proj -lora_target_linear: # If true, will target all linear modules - -# List[int] | int. # The layer indices to transform, otherwise, apply to all layers -# https://huggingface.co/docs/peft/v0.15.0/en/package_reference/lora#peft.LoraConfig.layers_to_transform -peft_layers_to_transform: - -# Optional[bool]. Whether to use DoRA. -# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#weight-decomposed-low-rank-adaptation-dora -peft_use_dora: - -# Optional[bool]. Whether to use RSLoRA. -# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#rank-stabilized-lora -peft_use_rslora: - -# Optional[list[tuple[int, int]]]. List of layer indices to replicate. -# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#memory-efficient-layer-replication-with-lora -peft_layer_replication: - -# bool | Literal["gaussian", "eva", "olora", "pissa", "pissa_niter_[number of iters]", "corda", "loftq"] -# How to initialize LoRA weights. Default to True which is MS original implementation. -# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#initialization -peft_init_lora_weights: - -# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens. -# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models. -# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities. -# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994 -lora_modules_to_save: -# - embed_tokens -# - lm_head - -lora_fan_in_fan_out: false - -# Apply custom LoRA autograd functions and activation function Triton kernels for -# speed and memory savings -# See: https://docs.axolotl.ai/docs/lora_optims.html -lora_mlp_kernel: true -lora_qkv_kernel: true -lora_o_kernel: true - -# LoRA+ hyperparameters -# For more details about the following options, see: -# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py` -loraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4. -loraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6. - -peft: - # Configuration options for loftq initialization for LoRA - # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization - loftq_config: - loftq_bits: # typically 4 bits - -# ReLoRA configuration -# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed -relora_steps: # Number of steps per ReLoRA restart -relora_warmup_steps: # Number of per-restart warmup steps -relora_anneal_steps: # Number of anneal steps for each relora cycle -relora_prune_ratio: # threshold for optimizer magnitude when pruning -relora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings - -# wandb configuration if you're using it -# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`. -wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb -wandb_project: # Your wandb project name -wandb_entity: # A wandb Team name if using a Team -wandb_watch: -wandb_name: # Set the name of your wandb run -wandb_run_id: # Set the ID of your wandb run -wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training - -# mlflow configuration if you're using it -mlflow_tracking_uri: # URI to mlflow -mlflow_experiment_name: # Your experiment name -mlflow_run_name: # Your run name -hf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry - -# Comet configuration if you're using it -# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`. -# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start -use_comet: # Enable or disable Comet integration. -comet_api_key: # API key for Comet. Recommended to set via `comet login`. -comet_workspace: # Workspace name in Comet. Defaults to the user's default workspace. -comet_project_name: # Project name in Comet. Defaults to Uncategorized. -comet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key. -comet_mode: # Create a new experiment ("create") or log to an existing one ("get"). Default ("get_or_create") auto-selects based on configuration. -comet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True. -comet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details. - -# Tensorboard -use_tensorboard: # Optional[bool] - -# Where to save the full-finetuned model to -output_dir: ./completed-model - -# Whether to use torch.compile and which backend to use -# setting to `auto` will enable torch compile when torch>=2.5.1 -torch_compile: # Optional[Union[Literal["auto"], bool]] -torch_compile_backend: # Optional[str] -torch_compile_mode: # 'default' | 'reduce-overhead' | 'max-autotune' - -# Training hyperparameters - -# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps. -gradient_accumulation_steps: 1 -# The number of samples to include in each batch. This is the number of samples sent to each GPU. -# Batch size per gpu = micro_batch_size * gradient_accumulation_steps -micro_batch_size: 2 -eval_batch_size: -num_epochs: 4 -warmup_steps: 100 # cannot use with warmup_ratio -warmup_ratio: 0.05 # cannot use with warmup_steps -learning_rate: 0.00003 -lr_quadratic_warmup: -logging_steps: -eval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps -evals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps -eval_strategy: # Set to `"no"` to skip evaluation, `"epoch"` at end of each epoch, leave empty to infer from `eval_steps`. -save_strategy: # Set to `"no"` to skip checkpoint saves, `"epoch"` at end of each epoch, `"best"` when better result is achieved, leave empty to infer from `save_steps`. -save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps -saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps -save_total_limit: # Checkpoints saved at a time -save_only_model: # Save only the model weights, skipping the optimizer. Using this means you can't resume from checkpoints. -# Maximum number of iterations to train for. It precedes num_epochs which means that -# if both are set, num_epochs will not be guaranteed. -# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps -max_steps: - -# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time. -include_tokens_per_second: # Optional[bool] - -# whether to find batch size that fits in memory. Passed to underlying transformers Trainer -auto_find_batch_size: # Optional[bool] - -eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0 -eval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128 -do_causal_lm_eval: # Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`. -eval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is ["sacrebleu", "comet", "ter", "chrf", "perplexity"] - -profiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir. - # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information - # snapshots can be visualized @ https://pytorch.org/memory_viz - -loss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training) -loss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3) - -# Save model as safetensors (require safetensors package). Default True -save_safetensors: - -# Whether to mask out or include the human's prompt from the training labels -train_on_inputs: false -# Group similarly sized data to minimize padding. -# May be slower to start, as it must download and sort the entire dataset. -# Note that training loss may have an oscillating pattern with this enabled. -group_by_length: false - -# Whether to use gradient checkpointing. Available options are: true, false, "offload", "offload_disk". -# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing -gradient_checkpointing: false -# additional kwargs to pass to the trainer for gradient checkpointing -# gradient_checkpointing_kwargs: -# use_reentrant: true - -# Stop training after this many evaluation losses have increased in a row -# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback -early_stopping_patience: 3 - -# Specify a scheduler and kwargs to use with the optimizer -# Valid values are driven by the Transformers SchedulerType class, see: -# https://github.com/huggingface/transformers/blob/5f4ecf2d9f867a1255131d2461d75793c0cf1db2/src/transformers/trainer_utils.py#L420 -# Valid values include -# - 'linear' -# - 'cosine' (default) -# - 'cosine_with_restarts' -# - 'polynomial' -# - 'constant' -# - 'constant_with_warmup' -# - 'inverse_sqrt' -# - 'reduce_lr_on_plateau' -# - 'cosine_with_min_lr' -# - 'warmup_stable_decay' - -# Additional schedulers include: -# - 'one_cycle' -# - 'rex' -lr_scheduler: -lr_scheduler_kwargs: -cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr -cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf) - -# For one_cycle optim -lr_div_factor: # Learning rate div factor - -# Specify optimizer -# Valid values are driven by the Transformers OptimizerNames class, see: -# https://github.com/huggingface/transformers/blob/cbf924b76c03828101a34069a96d209314114fd5/src/transformers/training_args.py#L144-L189 -# -# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of -# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used -# in the examples/ for your model and fine-tuning use case. -# -# Valid values for 'optimizer' include: -# - adamw_torch -# - adamw_torch_fused (default) -# - adamw_torch_xla -# - adamw_torch_npu_fused -# - adamw_apex_fused -# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1) -# - adafactor -# - adamw_anyprecision -# - adamw_torch_4bit -# - ademamix -# - sgd -# - adagrad -# - adamw_bnb_8bit -# - adamw_8bit # alias for adamw_bnb_8bit -# - ademamix_8bit -# - lion_8bit -# - lion_32bit -# - paged_adamw_32bit -# - paged_adamw_8bit -# - paged_ademamix_32bit -# - paged_ademamix_8bit -# - paged_lion_32bit -# - paged_lion_8bit -# - rmsprop -# - rmsprop_bnb -# - rmsprop_bnb_8bit -# - rmsprop_bnb_32bit -# - galore_adamw -# - galore_adamw_8bit -# - galore_adafactor -# - galore_adamw_layerwise -# - galore_adamw_8bit_layerwise -# - galore_adafactor_layerwise -# - lomo -# - adalomo -# - grokadamw -# - schedule_free_adamw -# - schedule_free_sgd -# - apollo_adamw -# - apollo_adamw_layerwise -# -# Additional custom optimizers include: -# - optimi_adamw -# - ao_adamw_8bit -# - ao_adamw_fp8 -# - came_pytorch -optimizer: -# Dictionary of arguments to pass to the optimizer -optim_args: -# For Galore Optimizers the following optim_args are available -# rank: # type: int -# update_proj_gap # type: int -# scale # type: float -# proj_type: # type: str, default = std - -# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm -optim_target_modules: -# - self_attn # for llama -# - mlp - -# Specify weight decay -weight_decay: -# adamw hyperparams -adam_beta1: -adam_beta2: -adam_beta3: # only used for CAME Optimizer -adam_epsilon: -adam_epsilon2: # only used for CAME Optimizer -# Gradient clipping max norm -max_grad_norm: - -# Augmentation techniques -# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings -# currently only supported on Llama and Mistral -neftune_noise_alpha: - -# Optional[bool]. Whether to bettertransformers -flash_optimum: - -# Note: Only one of the following attention patches can be used at a time. -# For example, if you set `xformers_attention` to `true`, do not set `flash_attention` to `true`. - -# Optional[bool]. Whether to use xformers attention patch https://github.com/facebookresearch/xformers: -xformers_attention: -# Optional[bool]. Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention: -flash_attention: -flash_attn_cross_entropy: # Optional[bool]. Whether to use flash-attention cross entropy implementation - advanced use only -flash_attn_rms_norm: # Optional[bool]. Whether to use flash-attention rms norm implementation - advanced use only -flash_attn_fuse_qkv: # Optional[bool]. Whether to fuse QKV into a single operation -flash_attn_fuse_mlp: # Optional[bool]. Whether to fuse part of the MLP into a single operation -# Optional[bool]. Whether to use scaled-dot-product attention -# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html -sdp_attention: -# Optional[bool]. Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf -s2_attention: - -# Optional[bool]. Whether to use low_cpu_mem_usage -low_cpu_mem_usage: -# Optional[str]. Resume from a specific checkpoint dir -resume_from_checkpoint: -# Optional[bool]. If resume_from_checkpoint isn't set and you simply want it to start where it left off. -# Be careful with this being turned on between different models. -auto_resume_from_checkpoints: false - -## Multimodal section -# int | tuple[int, int] | None . Size to resize images to, width x height. -# Will read from model/processor config if not set. -image_size: -# str. Algorithm to use for image resizing. "bilinear", "bicubic", "lanczos". Default is "bilinear". -image_resize_algorithm: 'bilinear' -## End of multimodal section - -# Don't mess with this, it's here for accelerate and torchrun -local_rank: - -# Add or change special tokens. -# If you add tokens here, you don't need to add them to the `tokens` list. -special_tokens: - # bos_token: "" - # eos_token: "" - # unk_token: "" - # pad_token: "[PAD]" - -# Optional[list[str]]. Add extra tokens to the tokenizer. -tokens: - # - "<|startoftext|>" - # - "<|endoftext|>" - -# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer. -# Only works for tokens that are not part of the base vocab (aka are added_tokens). -# Can be checked if they exist in tokenizer.json added_tokens. -added_tokens_overrides: # Dict[int, str] -# 128041: "<|im_start|>" -# 128042: "<|im_end|>" - -# FSDP -fsdp: -fsdp_config: - -# Deepspeed config path. e.g., deepspeed_configs/zero3.json -deepspeed: - -# Advanced DDP Arguments -ddp_timeout: -ddp_bucket_cap_mb: -ddp_broadcast_buffers: - -# Sequence parallelism -# Set to a divisor of the number of GPUs available to split sequences into chunks of equal size. -# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM. -# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized -# subsequences, or set to 4 to split into four equal-sized subsequences. -# See https://docs.axolotl.ai/docs/sequence_parallelism.html for more details. -sequence_parallel_degree: -# Optional; strides across the key dimension. Larger values use more memory but should make training faster. -# Must evenly divide the number of KV heads in your model. -heads_k_stride: 1 -# One of "varlen_llama3", "batch_ring", "batch_zigzag", "batch_stripe". Defaults to "varlen_llama3" -# in the sample packing case, and "batch_ring" in the non-sample packing case. -ring_attn_func: - -# Path to torch distx for optim 'adamw_anyprecision' -torchdistx_path: - -# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize -pretraining_dataset: - -# Debug mode -debug: - -# Seed -seed: - -# Allow overwrite yml config using from cli -strict: -``` diff --git a/docs/dataset-formats/conversation.qmd b/docs/dataset-formats/conversation.qmd index 290841c08..d1fca9441 100644 --- a/docs/dataset-formats/conversation.qmd +++ b/docs/dataset-formats/conversation.qmd @@ -12,7 +12,7 @@ Chat Template strategy uses a jinja2 template that converts a list of messages i {"conversations": [{"role": "...", "content": "..."}]} ``` -See [configs](../config.qmd) for full configs and supported templates. +See [configs](../config-reference.qmd) for full configs and supported templates. ### Migrating from sharegpt @@ -130,13 +130,13 @@ datasets: ``` ::: {.callout-tip} -See [config documentation](../config.qmd) for detailed explanations of "turn", "last", and "all" options for training on tokens. +See [config documentation](../config-reference.qmd) for detailed explanations of "turn", "last", and "all" options for training on tokens. ::: ::: {.callout-note} Using `eot_tokens` requires each token that exists in `chat_template` to be a single token in the tokenizer. Otherwise, the tokenizer will split the token and cause unexpected behavior. -You can add those tokens as new tokens under `tokens: ` or (recommended) override unused added_tokens via `added_tokens_overrides: `. See [config](../config.qmd) for more details. +You can add those tokens as new tokens under `tokens: ` or (recommended) override unused added_tokens via `added_tokens_overrides: `. See [config](../config-reference.qmd) for more details. ::: - Continuing from the previous example, if you want to train on all EOT token trainable turns but only last EOS token, set `train_on_eos: last`. diff --git a/docs/dataset-formats/inst_tune.qmd b/docs/dataset-formats/inst_tune.qmd index d89c6adaf..f5bd7ab8f 100644 --- a/docs/dataset-formats/inst_tune.qmd +++ b/docs/dataset-formats/inst_tune.qmd @@ -186,4 +186,4 @@ datasets: no_input_format: "[INST] {instruction} [/INST]" ``` -See full config options under [here](../config.qmd). +See full config options under [here](../config-reference.qmd). diff --git a/docs/dataset_loading.qmd b/docs/dataset_loading.qmd index b78f86a98..bcffe7f0f 100644 --- a/docs/dataset_loading.qmd +++ b/docs/dataset_loading.qmd @@ -36,7 +36,7 @@ This matches the API of [`datasets.load_dataset`](https://github.com/huggingface For HuggingFace's guide to load different dataset types, see [here](https://huggingface.co/docs/datasets/loading). -For full details on the config, see [config.qmd](config.qmd). +For full details on the config, see [config-reference.qmd](config-reference.qmd). ::: {.callout-note} diff --git a/docs/getting-started.qmd b/docs/getting-started.qmd index 6f1b54348..de059c397 100644 --- a/docs/getting-started.qmd +++ b/docs/getting-started.qmd @@ -55,7 +55,7 @@ output_dir: ./outputs/lora-out - To perform QLoRA finetuning, replace with `load_in_4bit: true` and `adapter: qlora`. ::: -See our [Config options](config.qmd) for more details. +See our [config options](config-reference.qmd) for more details. ### Training {#sec-training} @@ -179,7 +179,7 @@ Now that you have the basics, you might want to: Check our other guides for details on these topics: -- [Configuration Guide](config.qmd) - Full configuration options +- [Configuration Guide](config-reference.qmd) - Full configuration options - [Dataset Loading](dataset_loading.qmd) - Loading datasets from various sources - [Dataset Formats](dataset-formats) - Working with different data formats - [Multi-GPU Training](multi-gpu.qmd) diff --git a/docs/quantize.qmd b/docs/quantize.qmd index 294efda8b..113fcafbe 100644 --- a/docs/quantize.qmd +++ b/docs/quantize.qmd @@ -32,7 +32,7 @@ output_dir: # The path to the output directory. Once quantization is complete, your quantized model will be saved in the `{output_dir}/quantized` directory. -You may also use the `quantize` command to quantize a model which has been trained with [QAT](./qat.md) - you can do this by using the existing QAT configuration file which +You may also use the `quantize` command to quantize a model which has been trained with [QAT](./qat.qmd) - you can do this by using the existing QAT configuration file which you used to train the model: ```yaml diff --git a/docs/scripts/generate_config_docs.py b/docs/scripts/generate_config_docs.py new file mode 100644 index 000000000..e22da7d05 --- /dev/null +++ b/docs/scripts/generate_config_docs.py @@ -0,0 +1,752 @@ +# type: ignore + +""" +Quarto documentation generation from Pydantic models. Uses Pydantic model source code +to automatically group fields, including inherited fields from parent classes. +""" + +import ast +import inspect +import textwrap +import types +import typing +from typing import Any, FrozenSet, Type, Union + +from pydantic import BaseModel + +from axolotl.utils.schemas.config import AxolotlInputConfig + + +class QuartoGenerator: + """Generate Quarto documentation from Pydantic models.""" + + def __init__(self): + self._class_fields_cache = {} + self._inheritance_map_cache = {} + self._nested_models_cache = {} + + def _get_direct_fields(self, cls: Type[BaseModel]) -> FrozenSet[str]: + """Get fields defined directly in a single class (not inherited).""" + if cls in self._class_fields_cache: + return self._class_fields_cache[cls] + + fields = set() + + # Get annotated fields + if hasattr(cls, "__annotations__"): + fields.update(cls.__annotations__.keys()) + + # Filter out private/special methods + fields = {f for f in fields if not f.startswith("_")} + + result = frozenset(fields) + self._class_fields_cache[cls] = result + return result + + def _is_pydantic_model(self, type_obj) -> bool: + """Check if a type is a Pydantic BaseModel.""" + return inspect.isclass(type_obj) and issubclass(type_obj, BaseModel) + + # pylint: disable=too-many-return-statements + def _extract_nested_type(self, field_type) -> Any: + """Extract the actual type from complex type annotations.""" + # Handle Annotated types (Python 3.9+) + if hasattr(typing, "get_origin") and hasattr(typing, "get_args"): + origin = typing.get_origin(field_type) + args = typing.get_args(field_type) + + if origin is not None: + # Handle Annotated[SomeType, ...] - extract the first argument + if hasattr(typing, "Annotated") and origin is typing.Annotated: + if args: + return self._extract_nested_type( + args[0] + ) # Recursively process the actual type + + # Handle list[SomeType], List[SomeType], etc. + elif origin in (list, typing.List): + if args: + return self._extract_nested_type( + args[0] + ) # Extract element type + + # Handle Union types (including | syntax) + elif origin is typing.Union: + # Get non-None types from the Union + non_none_types = [arg for arg in args if arg is not type(None)] + if len(non_none_types) >= 1: + # Prioritize Pydantic models over primitive types + pydantic_models = [ + arg + for arg in non_none_types + if self._is_pydantic_model(arg) + ] + if pydantic_models: + # Return the first Pydantic model found + return self._extract_nested_type(pydantic_models[0]) + + # No Pydantic models, return the first non-None type + return self._extract_nested_type(non_none_types[0]) + + # Handle new Python 3.10+ union syntax (PeftConfig | None) + if hasattr(field_type, "__class__") and field_type.__class__ is types.UnionType: + # Get non-None types from the Union + non_none_types = [ + arg for arg in field_type.__args__ if arg is not type(None) + ] + if len(non_none_types) >= 1: + # Prioritize Pydantic models over primitive types + pydantic_models = [ + arg for arg in non_none_types if self._is_pydantic_model(arg) + ] + if pydantic_models: + return self._extract_nested_type(pydantic_models[0]) + return self._extract_nested_type(non_none_types[0]) + + # Handle old typing.Union syntax (fallback) + if hasattr(field_type, "__origin__"): + if field_type.__origin__ is Union: + # Get non-None types from the Union + non_none_types = [ + arg for arg in field_type.__args__ if arg is not type(None) + ] + if len(non_none_types) >= 1: + # Prioritize Pydantic models over primitive types + pydantic_models = [ + arg for arg in non_none_types if self._is_pydantic_model(arg) + ] + if pydantic_models: + return self._extract_nested_type(pydantic_models[0]) + return self._extract_nested_type(non_none_types[0]) + # Handle other generic types like dict[str, Any], etc. + elif hasattr(field_type, "__args__"): + return field_type + + return field_type + + # pylint: disable=too-many-return-statements + def _extract_all_pydantic_models_from_type( + self, field_type + ) -> list[type[BaseModel]]: + """Extract all Pydantic models from a type annotation, including from Unions.""" + models = [] + + if field_type is None: + return models + + # Handle Annotated types + if hasattr(typing, "get_origin") and hasattr(typing, "get_args"): + origin = typing.get_origin(field_type) + args = typing.get_args(field_type) + + if origin is not None: + # Handle Annotated[SomeType, ...] - extract from the first argument + if hasattr(typing, "Annotated") and origin is typing.Annotated: + if args: + models.extend( + self._extract_all_pydantic_models_from_type(args[0]) + ) + return models + + # Handle list[SomeType], List[SomeType], etc. + if origin in (list, typing.List): + if args: + models.extend( + self._extract_all_pydantic_models_from_type(args[0]) + ) + return models + + # Handle Union types + if origin is typing.Union: + for arg in args: + if arg is not type(None): # Skip None type + models.extend( + self._extract_all_pydantic_models_from_type(arg) + ) + return models + + # Handle new Python 3.10+ union syntax + if hasattr(field_type, "__class__") and field_type.__class__ is types.UnionType: + for arg in field_type.__args__: + if arg is not type(None): # Skip None type + models.extend(self._extract_all_pydantic_models_from_type(arg)) + return models + + # Handle old typing.Union syntax (fallback) + if hasattr(field_type, "__origin__") and field_type.__origin__ is Union: + for arg in field_type.__args__: + if arg is not type(None): # Skip None type + models.extend(self._extract_all_pydantic_models_from_type(arg)) + return models + + # Check if this type itself is a Pydantic model + if self._is_pydantic_model(field_type): + models.append(field_type) + + return models + + def _get_nested_models( + self, model_class: type[BaseModel], visited=None + ) -> dict[str, type[BaseModel]]: + """Get all nested Pydantic models from a model class.""" + if visited is None: + visited = set() + + # Avoid infinite recursion + if model_class in visited: + return {} + + if model_class in self._nested_models_cache: + return self._nested_models_cache[model_class] + + visited.add(model_class) + nested_models = {} + + # Check all fields in the model + for field_info in model_class.model_fields.values(): + field_type = self._extract_nested_type(field_info.annotation) + + if self._is_pydantic_model(field_type): + nested_models[field_type.__name__] = field_type + # Recursively get nested models from this nested model + deeper_nested = self._get_nested_models(field_type, visited.copy()) + nested_models.update(deeper_nested) + + self._nested_models_cache[model_class] = nested_models + return nested_models + + def _build_inheritance_map(self, child_class: Type[BaseModel]): + """Build inheritance map for a class and all its parents.""" + if child_class in self._inheritance_map_cache: + return self._inheritance_map_cache[child_class] + + inheritance_map = {} + + # Get MRO and filter out BaseModel and object + mro_classes = [ + cls + for cls in child_class.__mro__ + if cls not in (BaseModel, object) and hasattr(cls, "__annotations__") + ] + + # Process each class in the MRO + for cls in mro_classes: + inheritance_map[cls] = self._get_direct_fields(cls) + + self._inheritance_map_cache[child_class] = inheritance_map + return inheritance_map + + def _wrap_comment(self, text: str, width: int = 88) -> list[str]: + """Wrap a comment to specified width, accounting for '# ' prefix.""" + if not text.strip(): + return ["#"] + + # Account for "# " prefix (2 characters) + content_width = width - 2 + wrapped_lines = textwrap.wrap(text, width=content_width) + return [f"# {line}" for line in wrapped_lines] + + def _extract_type_from_source( + self, model_class: type[BaseModel], field_name: str + ) -> str: + """Extract the actual type annotation text from source code, checking inheritance chain.""" + # Use inheritance map to check classes efficiently + inheritance_map = self._build_inheritance_map(model_class) + + # Check classes in MRO order + for cls in model_class.__mro__: + if cls in inheritance_map and field_name in inheritance_map[cls]: + type_annotation = self._get_type_from_class_source(cls, field_name) + if type_annotation != "unknown": + return type_annotation + + return "unknown" + + def _get_type_from_class_source(self, class_obj: type, field_name: str) -> str: + """Extract type annotation from a specific class's source code.""" + try: + source = inspect.getsource(class_obj) + tree = ast.parse(source) + except (OSError, TypeError): + return "unknown" + + # Find the class definition + for node in tree.body: + if isinstance(node, ast.ClassDef) and node.name == class_obj.__name__: + # Find the field assignment + for body_node in node.body: + if isinstance(body_node, ast.AnnAssign) and isinstance( + body_node.target, ast.Name + ): + if body_node.target.id == field_name and body_node.annotation: + return ast.unparse(body_node.annotation) + break + + return "unknown" + + def _extract_field_groups_from_all_classes( + self, model_class: type[BaseModel] + ) -> list[dict]: + """Extract field groups from all classes in the inheritance hierarchy.""" + all_groups = [] + inheritance_map = self._build_inheritance_map(model_class) + + # Get all Pydantic base classes in MRO order (most specific first) + # This puts AxolotlInputConfig fields first, then parent class fields + pydantic_classes = [ + cls + for cls in model_class.__mro__ + if cls in inheritance_map and inheritance_map[cls] + ] + + # Extract groups from each class + for cls in pydantic_classes: + class_groups = self._extract_field_groups_from_source(cls) + for group in class_groups: + all_groups.append(group) + + # If no groups found, create a default grouping by class + if not all_groups: + for cls in pydantic_classes: + fields_in_class = inheritance_map[cls] + if fields_in_class: + all_groups.append( + { + "fields": list(fields_in_class), + } + ) + + return all_groups + + # pylint: disable=too-many-return-statements + def _extract_field_groups_from_source( + self, model_class: type[BaseModel] + ) -> list[dict]: + """Extract field groups from source code based on blank lines and comments.""" + try: + source = inspect.getsource(model_class) + tree = ast.parse(source) + except (OSError, TypeError): + # Fallback if we can't get source code + fields_in_class = self._get_direct_fields(model_class) + if fields_in_class: + return [ + { + "fields": list(fields_in_class), + } + ] + return [] + + groups = [] + current_group_fields = [] + current_group_comment = None + + # Find the class definition + class_node = None + for node in ast.walk(tree): + if isinstance(node, ast.ClassDef) and node.name == model_class.__name__: + class_node = node + break + + if not class_node: + fields_in_class = self._get_direct_fields(model_class) + if fields_in_class: + return [ + { + "fields": list(fields_in_class), + } + ] + return [] + + # Parse the source lines to detect groupings + source_lines = source.split("\n") + + # Get fields that are actually defined in this specific class + fields_in_class = self._get_direct_fields(model_class) + + # Find assignments that correspond to model fields for THIS class only + field_assignments = [] + for node in class_node.body: + if isinstance(node, ast.AnnAssign) and isinstance(node.target, ast.Name): + field_name = node.target.id + if field_name in fields_in_class: + field_assignments.append( + { + "name": field_name, + "lineno": node.lineno, + "end_lineno": getattr(node, "end_lineno", node.lineno), + } + ) + + if not field_assignments: + if fields_in_class: + return [ + { + "fields": list(fields_in_class), + } + ] + return [] + + # Sort by line number + field_assignments.sort(key=lambda x: x["lineno"]) + + # Group fields based on blank lines and comments + for i, field_info in enumerate(field_assignments): + field_name = field_info["name"] + current_line = field_info["lineno"] + + # Check if this starts a new group (blank line before or significant gap) + is_new_group = False + + if i == 0: + is_new_group = True + else: + prev_end_line = field_assignments[i - 1]["end_lineno"] + + # Check for blank lines or comments between fields + lines_between = source_lines[prev_end_line : current_line - 1] + has_blank_line = any(line.strip() == "" for line in lines_between) + has_comment = any( + line.strip().startswith("#") for line in lines_between + ) + + # Start new group if there's a blank line or comment, or significant gap + if has_blank_line or has_comment or (current_line - prev_end_line > 3): + is_new_group = True + + if is_new_group and current_group_fields: + # Save the previous group + groups.append( + { + "fields": current_group_fields.copy(), + "description": current_group_comment, + } + ) + current_group_fields = [] + current_group_comment = None + + current_group_fields.append(field_name) + + # Add the final group + if current_group_fields: + groups.append( + { + "fields": current_group_fields, + "description": current_group_comment, + } + ) + + return groups + + def _generate_field_documentation( + self, + model_class: type[BaseModel], + field_name: str, + field_info: dict, + field_type_str: str, + is_required: bool, + indent_level: int = 0, + visited_models: set = None, + ) -> list[str]: + """Generate documentation for a single field, expanding nested models inline.""" + if visited_models is None: + visited_models = set() + + lines = [] + indent = " " * indent_level + + # Get the actual field type for nested model detection + if field_name in model_class.model_fields: + pydantic_field_info = model_class.model_fields[field_name] + actual_field_type = pydantic_field_info.annotation + else: + actual_field_type = None + + # Add description comment if available + description = field_info.get("description", "") + if description: + wrapped_lines = self._wrap_comment(description, width=88 - len(indent)) + for line in wrapped_lines: + lines.append(f"{indent}{line}") + + # Extract nested Pydantic models from the type annotation + nested_models = self._extract_all_pydantic_models_from_type(actual_field_type) + + # Filter out already visited models to prevent infinite recursion + expandable_models = [ + model for model in nested_models if model not in visited_models + ] + + if expandable_models: + # This field contains Pydantic models that can be expanded + + # Show the field with its full type annotation + field_line = f"{indent}{field_name}: {field_type_str}" + if field_info.get("default") is not None: + field_line += f" = {field_info['default']}" + if is_required: + field_line += " (required)" + lines.append(field_line) + + # Add to visited to prevent infinite recursion + new_visited = visited_models.copy() + new_visited.update(expandable_models) + + # Expand each nested Pydantic model + for i, nested_model in enumerate(expandable_models): + if i > 0: + lines.append("\n") + lines.append(f"{indent} # For {nested_model.__name__}:") + + # Get nested model schema + try: + nested_schema = nested_model.model_json_schema() + nested_properties = nested_schema.get("properties", {}) + nested_required = nested_schema.get("required", []) + except Exception: # pylint: disable=broad-exception-caught + # Fallback: use model fields directly + nested_properties = {} + nested_required = [] + for ( + nested_field_name, + nested_field_info, + ) in nested_model.model_fields.items(): + nested_description = "" + if ( + hasattr(nested_field_info, "json_schema_extra") + and nested_field_info.json_schema_extra + ): + nested_description = ( + nested_field_info.json_schema_extra.get( + "description", "" + ) + ) + elif ( + hasattr(nested_field_info, "description") + and nested_field_info.description + ): + nested_description = nested_field_info.description + + nested_default_val = None + if ( + hasattr(nested_field_info, "default") + and nested_field_info.default is not None + ): + if str(nested_field_info.default) != "PydanticUndefined": + nested_default_val = nested_field_info.default + + nested_properties[nested_field_name] = { + "type": "unknown", + "description": nested_description, + "default": nested_default_val, + } + + if nested_field_info.is_required(): + nested_required.append(nested_field_name) + + # Get field groups for the nested model + nested_field_groups = self._extract_field_groups_from_all_classes( + nested_model + ) + + # Generate nested fields with increased indentation + for i, group in enumerate(nested_field_groups): + if not group["fields"]: + continue + + # Add blank line between groups (except before first group) + if i > 0: + lines.append("") + + # Process nested fields + for nested_field_name in group["fields"]: + if nested_field_name not in nested_properties: + continue + + nested_field_info = nested_properties[nested_field_name] + nested_field_type = self._extract_type_from_source( + nested_model, nested_field_name + ) + nested_is_required = nested_field_name in nested_required + + # Recursively generate documentation for nested field + nested_lines = self._generate_field_documentation( + nested_model, + nested_field_name, + nested_field_info, + nested_field_type, + nested_is_required, + indent_level + 1, + new_visited, + ) + lines.extend(nested_lines) + else: + # Regular field (no expandable nested models) + field_line = f"{indent}{field_name}: {field_type_str}" + if field_info.get("default") is not None: + field_line += f" = {field_info['default']}" + if is_required: + field_line += " (required)" + lines.append(field_line) + + return lines + + def generate_qmd( + self, + model_class: type[BaseModel], + title: str | None = None, + expand_nested: bool = True, + ) -> str: + """Auto-generate config reference documentation including inherited fields.""" + + if title is None: + title = f"{model_class.__name__} Reference" + + # Try to get JSON schema, with fallback for serialization issues + try: + schema = model_class.model_json_schema() + properties = schema.get("properties", {}) + required = schema.get("required", []) + except Exception as e: # pylint: disable=broad-exception-caught + print( + f"Warning: Could not generate JSON schema ({e}). Using model fields instead." + ) + # Fallback: use model fields directly + properties = {} + required = [] + for field_name, field_info in model_class.model_fields.items(): + # Extract description from json_schema_extra or field info + description = "" + if ( + hasattr(field_info, "json_schema_extra") + and field_info.json_schema_extra + ): + description = field_info.json_schema_extra.get("description", "") + elif hasattr(field_info, "description") and field_info.description: + description = field_info.description + + # Get default value + default_val = None + if hasattr(field_info, "default") and field_info.default is not None: + # Handle special Pydantic default markers + if str(field_info.default) != "PydanticUndefined": + default_val = field_info.default + + properties[field_name] = { + "type": "unknown", + "description": description, + "default": default_val, + } + + if field_info.is_required(): + required.append(field_name) + + # Extract field groups from all classes in inheritance hierarchy + field_groups = self._extract_field_groups_from_all_classes(model_class) + + # Start building QMD content + qmd_lines = [ + "---", + f"title: {title}", + "description: A complete list of all configuration options.", + "---", + "", + ] + + # Generate one big code block with all fields (inline nested expansion) + qmd_lines.append("```yaml") + + for i, group in enumerate(field_groups): + if not group["fields"]: + continue + + # Add blank line between groups (except before first group) + if i > 0: + qmd_lines.append("") + + # Process fields in the order they appear in source + for field_name in group["fields"]: + if field_name not in properties: + continue + + field_info = properties[field_name] + field_type = self._extract_type_from_source(model_class, field_name) + is_required = field_name in required + + if expand_nested: + # Check if this field has nested models + if field_name in model_class.model_fields: + pydantic_field_info = model_class.model_fields[field_name] + nested_models = self._extract_all_pydantic_models_from_type( + pydantic_field_info.annotation + ) + has_nested = bool(nested_models) + else: + has_nested = False + + # Add blank line before nested config + if has_nested: + qmd_lines.append("") + + # Use the new inline generation method + field_lines = self._generate_field_documentation( + model_class, + field_name, + field_info, + field_type, + is_required, + indent_level=0, + visited_models=set(), + ) + qmd_lines.extend(field_lines) + + # Add blank line after nested config + if has_nested: + qmd_lines.append("") + else: + # Original simple approach + description = field_info.get("description", "") + default = field_info.get("default") + + # Add wrapped comment for description + if description: + wrapped_lines = self._wrap_comment(description) + qmd_lines.extend(wrapped_lines) + + line = f"{field_name}: {field_type}" + if default is not None: + line += f" = {default}" + if is_required: + line += " (required)" + qmd_lines.append(line) + + qmd_lines.append("```") + + # Join all lines and clean up any double newlines + content = "\n".join(qmd_lines) + + # Replace multiple consecutive newlines with just two newlines (one blank line) + import re + + content = re.sub(r"\n{3,}", "\n\n", content) + + # Ensure single newline at the very end + content = content.rstrip("\n") + "\n" + + return content + + +def main(): + generator = QuartoGenerator() + + print("Generating config reference content...") + qmd_content = generator.generate_qmd(AxolotlInputConfig, "Config Reference", True) + + print("Writing to file...") + with open("docs/config-reference.qmd", "w", encoding="utf-8") as f: + f.write(qmd_content) + print("Done!") + + +if __name__ == "__main__": + main() diff --git a/src/axolotl/utils/schemas/config.py b/src/axolotl/utils/schemas/config.py index 33a8f77db..259daa56f 100644 --- a/src/axolotl/utils/schemas/config.py +++ b/src/axolotl/utils/schemas/config.py @@ -12,10 +12,8 @@ from pydantic import ( Field, StringConstraints, field_serializer, - field_validator, model_validator, ) -from transformers.utils.import_utils import is_torch_npu_available from axolotl.utils.logging import get_logger from axolotl.utils.schemas.datasets import ( @@ -47,14 +45,13 @@ from axolotl.utils.schemas.peft import LoraConfig, ReLoRAConfig from axolotl.utils.schemas.quantization import PTQConfig, QATConfig from axolotl.utils.schemas.training import HyperparametersConfig from axolotl.utils.schemas.trl import TRLConfig +from axolotl.utils.schemas.validation import ValidationMixin from axolotl.utils.schemas.vllm import VllmConfig LOG = get_logger(__name__, use_environ=True) -SUPPORTED_METRICS = {"sacrebleu", "comet", "ter", "chrf", "perplexity"} - -# pylint: disable=too-many-public-methods,too-many-ancestors +# pylint: disable=too-many-ancestors class AxolotlInputConfig( ModelInputConfig, ModelOutputConfig, @@ -70,22 +67,54 @@ class AxolotlInputConfig( MultiModalConfig, RemappedParameters, DeprecatedParameters, + ValidationMixin, BaseModel, ): - """Wrapper of all config options""" + """Wrapper of all config options.""" model_config = {"populate_by_name": True} - strict: bool | None = Field(default=False) - resume_from_checkpoint: str | None = None - auto_resume_from_checkpoints: bool | None = None - resize_token_embeddings_to_32x: bool | None = None + strict: bool | None = Field( + default=False, + json_schema_extra={"description": "Allow overwrite yml config using from cli"}, + ) + resume_from_checkpoint: str | None = Field( + default=None, + json_schema_extra={"description": "Resume from a specific checkpoint dir"}, + ) + auto_resume_from_checkpoints: bool | None = Field( + default=None, + json_schema_extra={ + "description": "If resume_from_checkpoint isn't set and you simply want it to start where it left off. Be careful with this being turned on between different models." + }, + ) + resize_token_embeddings_to_32x: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Resize the model embeddings when new tokens are added to multiples of 32. This is reported to improve training speed on some models" + }, + ) mean_resizing_embeddings: bool | None = False # optionally shrink the embeddings when the tokenizer vocab size is smaller - shrink_embeddings: bool | None = None - embeddings_skip_upcast: bool | None = None + shrink_embeddings: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink." + }, + ) + embeddings_skip_upcast: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs" + }, + ) - rl: RLType | None = None + rl: RLType | None = Field( + default=None, + json_schema_extra={ + "description": "Use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'" + }, + ) trl: TRLConfig | None = Field( default_factory=lambda: TRLConfig(), # pylint: disable=unnecessary-lambda ) @@ -94,12 +123,25 @@ class AxolotlInputConfig( ) qat: QATConfig | None = None quantization: PTQConfig | None = None - reward_model: bool | None = None - process_reward_model: bool | None = None + reward_model: bool | None = Field( + default=None, + json_schema_extra={"description": "Reward modelling: `True` or `False`"}, + ) + process_reward_model: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Process reward modelling: `True` or `False`" + }, + ) num_labels: int | None = None # Whether to use weighting in DPO trainer. # If `None`, default is `False` in the trainer. - dpo_use_weighting: bool | None = None + dpo_use_weighting: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to perform weighting in DPO trainer" + }, + ) dpo_use_logits_to_keep: bool | None = None dpo_label_smoothing: float | None = None dpo_norm_loss: bool | None = None @@ -111,7 +153,12 @@ class AxolotlInputConfig( MinLen(1), ] | None - ) = None + ) = Field( + default=None, + json_schema_extra={ + "description": "A list of one or more datasets to finetune the model with" + }, + ) test_datasets: ( Annotated[ @@ -119,22 +166,59 @@ class AxolotlInputConfig( MinLen(1), ] | None - ) = None - shuffle_merged_datasets: bool | None = True - dataset_prepared_path: str | None = None - dataset_shard_num: int | None = None - dataset_shard_idx: int | None = None + ) = Field( + default=None, + json_schema_extra={ + "description": "A list of one or more datasets to eval the model with. You can use either test_datasets, or val_set_size, but not both." + }, + ) + shuffle_merged_datasets: bool | None = Field( + default=True, + json_schema_extra={ + "description": "If false, the datasets will not be shuffled and will keep their original order in `datasets`. The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true." + }, + ) + dataset_prepared_path: str | None = Field( + default=None, + json_schema_extra={ + "description": "Axolotl attempts to save the dataset as an arrow after packing the data together so subsequent training attempts load faster, relative path" + }, + ) + dataset_shard_num: int | None = Field( + default=None, json_schema_extra={"description": "Num shards for whole dataset"} + ) + dataset_shard_idx: int | None = Field( + default=None, + json_schema_extra={"description": "Index of shard to use for whole dataset"}, + ) skip_prepare_dataset: bool | None = False pretraining_dataset: ( Annotated[list[PretrainingDataset | SFTDataset], MinLen(1)] | None ) = Field( default=None, - json_schema_extra={"description": "streaming dataset to use for pretraining"}, + json_schema_extra={ + "description": "Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize" + }, + ) + dataset_processes: int | None = Field( + default=min(32, os.cpu_count()), # type: ignore[type-var] + json_schema_extra={ + "description": "The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()` if not set." + }, + ) + dataset_exact_deduplication: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Deduplicates datasets and test_datasets with identical entries" + }, + ) + dataset_keep_in_memory: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Keep dataset in memory while preprocessing. Only needed if cached dataset is taking too much storage" + }, ) - dataset_processes: int | None = Field(default=min(32, os.cpu_count() or 1)) - dataset_exact_deduplication: bool | None = None - dataset_keep_in_memory: bool | None = None dataloader_pin_memory: bool | None = None dataloader_num_workers: int | None = None dataloader_prefetch_factor: int | None = None @@ -144,75 +228,203 @@ class AxolotlInputConfig( remove_unused_columns: bool | None = None - push_dataset_to_hub: str | None = None - hf_use_auth_token: bool | None = None + push_dataset_to_hub: str | None = Field( + default=None, + json_schema_extra={ + "description": "Push prepared dataset to hub - repo_org/repo_name" + }, + ) + hf_use_auth_token: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets. Required to be true when used in combination with `push_dataset_to_hub`" + }, + ) device: Any | None = None - device_map: Any | None = None + device_map: Any | None = Field( + default=None, + json_schema_extra={ + "description": "Passed through to transformers when loading the model when launched without accelerate. Use `sequential` when training w/ model parallelism to limit memory" + }, + ) world_size: int | None = None - local_rank: int | None = None + local_rank: int | None = Field( + default=None, + json_schema_extra={ + "description": "Don't mess with this, it's here for accelerate and torchrun" + }, + ) ddp: bool | None = None - seed: int | None = None - ddp_timeout: int | None = None - ddp_bucket_cap_mb: int | None = None - ddp_broadcast_buffers: bool | None = None + seed: int | None = Field( + default=None, json_schema_extra={"description": "Seed for reproducibility"} + ) + ddp_timeout: int | None = Field( + default=None, + json_schema_extra={"description": "Advanced DDP Arguments - timeout"}, + ) + ddp_bucket_cap_mb: int | None = Field( + default=None, + json_schema_extra={"description": "Advanced DDP Arguments - bucket cap in MB"}, + ) + ddp_broadcast_buffers: bool | None = Field( + default=None, + json_schema_extra={"description": "Advanced DDP Arguments - broadcast buffers"}, + ) ddp_find_unused_parameters: bool | None = None - eval_table_size: int | None = None - eval_max_new_tokens: int | None = None - do_causal_lm_eval: bool | None = None - eval_causal_lm_metrics: list[str] | None = None + eval_table_size: int | None = Field( + default=None, + json_schema_extra={ + "description": "Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0" + }, + ) + eval_max_new_tokens: int | None = Field( + default=None, + json_schema_extra={ + "description": "Total number of tokens generated for predictions sent to wandb. Default is 128" + }, + ) + do_causal_lm_eval: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`" + }, + ) + eval_causal_lm_metrics: list[str] | None = Field( + default=None, + json_schema_extra={ + "description": "HF evaluate metrics used during evaluation. Default is ['sacrebleu', 'comet', 'ter', 'chrf', 'perplexity']" + }, + ) do_bench_eval: bool | None = None bench_dataset: str | None = None bench_split: str | None = None metric_for_best_model: str | None = None greater_is_better: bool | None = None - loss_watchdog_threshold: float | None = None - loss_watchdog_patience: int | None = None + loss_watchdog_threshold: float | None = Field( + default=None, + json_schema_extra={ + "description": "High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)" + }, + ) + loss_watchdog_patience: int | None = Field( + default=None, + json_schema_extra={ + "description": "Number of high-loss steps in a row before the trainer aborts (default: 3)" + }, + ) gc_steps: int | None = None - bf16: Literal["auto"] | bool | None = "auto" - fp16: bool | None = None + bf16: Literal["auto"] | bool | None = Field( + default="auto", + json_schema_extra={ + "description": "Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection. require >=ampere" + }, + ) + fp16: bool | None = Field( + default=None, json_schema_extra={"description": "Use CUDA fp16"} + ) fp8: bool | None = None - bfloat16: bool | None = None # for non-AMP cases - float16: bool | None = None # for non-AMP cases - tf32: bool | None = None + bfloat16: bool | None = Field( + default=None, + json_schema_extra={ + "description": "No AMP (automatic mixed precision) - require >=ampere" + }, + ) # for non-AMP cases + float16: bool | None = Field( + default=None, + json_schema_extra={"description": "No AMP (automatic mixed precision)"}, + ) # for non-AMP cases + tf32: bool | None = Field( + default=None, + json_schema_extra={"description": "Use CUDA tf32 - require >=ampere"}, + ) float32: bool | None = None - # torch_dtype: torch.dtype | None - gradient_checkpointing: Literal["offload", "offload_disk"] | bool | None = Field( - default=False + default=False, + json_schema_extra={ + "description": "Whether to use gradient checkpointing. Available options are: true, false, 'offload', 'offload_disk'. https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing" + }, + ) + gradient_checkpointing_kwargs: dict[str, Any] | None = Field( + default=None, + json_schema_extra={ + "description": "Additional kwargs to pass to the trainer for gradient checkpointing" + }, ) - gradient_checkpointing_kwargs: dict[str, Any] | None = None unfrozen_parameters: list[str] | None = None - sequence_len: int = Field(default=512) + sequence_len: int = Field( + default=512, + json_schema_extra={ + "description": "The maximum length of an input to train with, this should typically be less than 2048 as most models have a token/context limit of 2048" + }, + ) min_sample_len: int | None = None max_prompt_len: int = Field( default=512, json_schema_extra={"description": "maximum prompt length for RL training"}, ) - sample_packing: bool | None = None - sample_packing_group_size: int | None = 100_000 - sample_packing_bin_size: int | None = 200 - sample_packing_sequentially: bool | None = None - eval_sample_packing: bool | None = None - pad_to_sequence_len: bool | None = None - curriculum_sampling: bool | None = None + sample_packing: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'" + }, + ) + sample_packing_group_size: int | None = Field( + default=100_000, + json_schema_extra={ + "description": "The number of samples packed at a time. Increasing the following values helps with packing, but usually only slightly (<%1.)" + }, + ) + sample_packing_bin_size: int | None = Field( + default=200, + json_schema_extra={ + "description": "The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples." + }, + ) + sample_packing_sequentially: bool | None = Field( + default=None, + json_schema_extra={"description": "Whether to pack samples sequentially"}, + ) + eval_sample_packing: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Set to 'false' if getting errors during eval with sample_packing on" + }, + ) + pad_to_sequence_len: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Pad inputs so each step uses constant sized buffers. This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently" + }, + ) + curriculum_sampling: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to use sequential sampling for curriculum learning" + }, + ) multipack_real_batches: bool | None = None pretraining_sample_concatenation: bool | None = Field( default=None, json_schema_extra={ - "description": "whether to soft pack/concatenate samples during pretraining", + "description": "whether to concatenate samples during pretraining", }, ) - batch_flattening: Literal["auto"] | bool | None = None + batch_flattening: Literal["auto"] | bool | None = Field( + default=None, + json_schema_extra={ + "description": "Use batch flattening for speedups when not using sample_packing" + }, + ) # for PoSE context length extension use_pose: bool | None = None @@ -228,17 +440,60 @@ class AxolotlInputConfig( }, ) - xformers_attention: bool | None = None - sdp_attention: bool | None = None - s2_attention: bool | None = None + xformers_attention: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to use xformers attention patch https://github.com/facebookresearch/xformers" + }, + ) + sdp_attention: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html" + }, + ) + s2_attention: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf" + }, + ) flex_attention: bool | None = None flex_attn_compile_kwargs: dict[str, Any] | None = None - flash_attention: bool | None = None - flash_attn_cross_entropy: bool | None = None - flash_attn_rms_norm: bool | None = None - flash_attn_fuse_qkv: bool | None = None - flash_attn_fuse_mlp: bool | None = None - flash_optimum: bool | None = None + flash_attention: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention" + }, + ) + flash_attn_cross_entropy: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to use flash-attention cross entropy implementation - advanced use only" + }, + ) + flash_attn_rms_norm: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to use flash-attention rms norm implementation - advanced use only" + }, + ) + flash_attn_fuse_qkv: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to fuse QKV into a single operation" + }, + ) + flash_attn_fuse_mlp: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to fuse part of the MLP into a single operation" + }, + ) + flash_optimum: bool | None = Field( + default=None, + json_schema_extra={"description": "Whether to use bettertransformers"}, + ) eager_attention: bool | None = None @@ -249,76 +504,273 @@ class AxolotlInputConfig( unsloth_rms_norm: bool | None = None unsloth_rope: bool | None = None - lora_mlp_kernel: bool | None = None - lora_qkv_kernel: bool | None = None - lora_o_kernel: bool | None = None + lora_mlp_kernel: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Apply custom LoRA autograd functions and activation function Triton kernels for speed and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html" + }, + ) + lora_qkv_kernel: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Apply custom LoRA autograd functions and activation function Triton kernels for speed and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html" + }, + ) + lora_o_kernel: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Apply custom LoRA autograd functions and activation function Triton kernels for speed and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html" + }, + ) llama4_linearized_experts: bool | None = None - deepspeed: str | dict[str, Any] | None = None - fsdp: list[str] | None = None - fsdp_config: dict[str, Any] | None = None + deepspeed: str | dict[str, Any] | None = Field( + default=None, + json_schema_extra={ + "description": "Deepspeed config path. e.g., deepspeed_configs/zero3.json" + }, + ) + fsdp: list[str] | None = Field( + default=None, json_schema_extra={"description": "FSDP configuration"} + ) + fsdp_config: dict[str, Any] | None = Field( + default=None, json_schema_extra={"description": "FSDP configuration options"} + ) fsdp_final_state_dict_type: ( Literal["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"] | None ) = None - val_set_size: float | None = Field(default=0.0) + val_set_size: float | None = Field( + default=0.0, + json_schema_extra={ + "description": "How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval." + }, + ) - sequence_parallel_degree: int | None = None - heads_k_stride: int | None = None - ring_attn_func: RingAttnFunc | None = None + sequence_parallel_degree: int | None = Field( + default=None, + json_schema_extra={ + "description": "Set to a divisor of the number of GPUs available to split sequences into chunks of equal size. Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more details." + }, + ) + heads_k_stride: int | None = Field( + default=None, + json_schema_extra={ + "description": "Optional; strides across the key dimension. Larger values use more memory but should make training faster. Must evenly divide the number of KV heads in your model." + }, + ) + ring_attn_func: RingAttnFunc | None = Field( + default=None, + json_schema_extra={ + "description": "One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing case." + }, + ) - special_tokens: SpecialTokensConfig | None = None - tokens: list[str] | None = None - added_tokens_overrides: dict[int, str] | None = None + special_tokens: SpecialTokensConfig | None = Field( + default=None, + json_schema_extra={ + "description": "Add or change special tokens. If you add tokens here, you don't need to add them to the `tokens` list." + }, + ) + tokens: list[str] | None = Field( + default=None, + json_schema_extra={"description": "Add extra tokens to the tokenizer"}, + ) + added_tokens_overrides: dict[int, str] | None = Field( + default=None, + json_schema_extra={ + "description": "Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer. Only works for tokens that are not part of the base vocab (aka are added_tokens). Can be checked if they exist in tokenizer.json added_tokens." + }, + ) - torch_compile: Literal["auto"] | bool | None = None - torch_compile_backend: str | None = None + torch_compile: Literal["auto"] | bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to use torch.compile and which backend to use. setting to `auto` will enable torch compile when torch>=2.5.1" + }, + ) + torch_compile_backend: str | None = Field( + default=None, + json_schema_extra={"description": "Backend to use for torch.compile"}, + ) torch_compile_mode: Literal["default", "reduce-overhead", "max-autotune"] | None = ( None ) - max_steps: int | None = None - warmup_steps: int | None = None - warmup_ratio: float | None = None - eval_steps: int | float | None = None - evals_per_epoch: int | None = None - eval_strategy: str | None = None - save_steps: int | float | None = None - saves_per_epoch: int | None = None - save_strategy: str | None = None - save_total_limit: int | None = None - logging_steps: int | None = None - early_stopping_patience: int | None = None + max_steps: int | None = Field( + default=None, + json_schema_extra={ + "description": "Maximum number of iterations to train for. It precedes num_epochs which means that if both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps" + }, + ) + warmup_steps: int | None = Field( + default=None, + json_schema_extra={ + "description": "Number of warmup steps. Cannot use with warmup_ratio" + }, + ) + warmup_ratio: float | None = Field( + default=None, + json_schema_extra={"description": "Warmup ratio. Cannot use with warmup_steps"}, + ) + eval_steps: int | float | None = Field( + default=None, + json_schema_extra={ + "description": "Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps" + }, + ) + evals_per_epoch: int | None = Field( + default=None, + json_schema_extra={ + "description": "Number of times per epoch to run evals, mutually exclusive with eval_steps" + }, + ) + eval_strategy: str | None = Field( + default=None, + json_schema_extra={ + "description": "Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer from `eval_steps`" + }, + ) + save_steps: int | float | None = Field( + default=None, + json_schema_extra={ + "description": "Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps" + }, + ) + saves_per_epoch: int | None = Field( + default=None, + json_schema_extra={ + "description": "Number of times per epoch to save a checkpoint, mutually exclusive with save_steps" + }, + ) + save_strategy: str | None = Field( + default=None, + json_schema_extra={ + "description": "Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better result is achieved, leave empty to infer from `save_steps`" + }, + ) + save_total_limit: int | None = Field( + default=None, json_schema_extra={"description": "Checkpoints saved at a time"} + ) + logging_steps: int | None = Field( + default=None, json_schema_extra={"description": "Logging frequency"} + ) + early_stopping_patience: int | None = Field( + default=None, + json_schema_extra={ + "description": "Stop training after this many evaluation losses have increased in a row. https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback" + }, + ) load_best_model_at_end: bool | None = False - save_only_model: bool | None = False - use_tensorboard: bool | None = None - profiler_steps: int | None = None - include_tokens_per_second: bool | None = None + save_only_model: bool | None = Field( + default=False, + json_schema_extra={ + "description": "Save only the model weights, skipping the optimizer. Using this means you can't resume from checkpoints." + }, + ) + use_tensorboard: bool | None = Field( + default=None, json_schema_extra={"description": "Use tensorboard for logging"} + ) + profiler_steps: int | None = Field( + default=None, + json_schema_extra={ + "description": "Enable the pytorch profiler to capture the first N steps of training to the output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information. Snapshots can be visualized @ https://pytorch.org/memory_viz" + }, + ) + include_tokens_per_second: bool | None = Field( + default=None, + json_schema_extra={ + "description": "bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time." + }, + ) - neftune_noise_alpha: float | None = None + neftune_noise_alpha: float | None = Field( + default=None, + json_schema_extra={ + "description": "NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings. Currently only supported on Llama and Mistral" + }, + ) - orpo_alpha: float | None = None - rpo_alpha: float | None = None - simpo_gamma: float | None = None - cpo_alpha: float | None = None + orpo_alpha: float | None = Field( + default=None, + json_schema_extra={ + "description": "Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping." + }, + ) + rpo_alpha: float | None = Field( + default=None, + json_schema_extra={ + "description": "Weighting of NLL term in loss from RPO paper" + }, + ) + simpo_gamma: float | None = Field( + default=None, + json_schema_extra={"description": "Target reward margin for the SimPO loss"}, + ) + cpo_alpha: float | None = Field( + default=None, json_schema_extra={"description": "Weight of the BC regularizer"} + ) - kto_desirable_weight: float | None = None - kto_undesirable_weight: float | None = None - rl_beta: float | None = None + kto_desirable_weight: float | None = Field( + default=None, + json_schema_extra={"description": "Factor for desirable loss term in KTO loss"}, + ) + kto_undesirable_weight: float | None = Field( + default=None, + json_schema_extra={ + "description": "Factor for undesirable loss term in KTO loss" + }, + ) + rl_beta: float | None = Field( + default=None, + json_schema_extra={"description": "The beta parameter for the RL training"}, + ) - max_memory: dict[int | Literal["cpu", "disk"], int | str] | None = None - gpu_memory_limit: int | str | None = None - low_cpu_mem_usage: bool | None = None + max_memory: dict[int | Literal["cpu", "disk"], int | str] | None = Field( + default=None, + json_schema_extra={ + "description": "Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model." + }, + ) + gpu_memory_limit: int | str | None = Field( + default=None, + json_schema_extra={ + "description": "Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset" + }, + ) + low_cpu_mem_usage: bool | None = Field( + default=None, + json_schema_extra={"description": "Whether to use low_cpu_mem_usage"}, + ) chat_template: ( ChatTemplate | Annotated[str, StringConstraints(pattern="^tokenizer_default_fallback_")] - ) | None = None - chat_template_jinja: str | None = None - chat_template_kwargs: dict[str, Any] | None = None - eot_tokens: list[str] | None = None - default_system_message: str | None = None + ) | None = Field( + default=None, + json_schema_extra={ + "description": "The name of the chat template to use for training, following values are supported: tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value. alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py. tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer. jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field. The selected chat template will be saved to the tokenizer_config.json for easier inferencing" + }, + ) + chat_template_jinja: str | None = Field( + default=None, + json_schema_extra={ + "description": "Custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null." + }, + ) + eot_tokens: list[str] | None = Field( + default=None, + json_schema_extra={ + "description": "Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the boundaries between conversation turns. For example: ['/INST', '', '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is useful for templates that use multiple delimiter tokens." + }, + ) + default_system_message: str | None = Field( + default=None, + json_schema_extra={ + "description": "Changes the default system message. Currently only supports chatml." + }, + ) fix_untrained_tokens: int | list[int] | None = None @@ -326,49 +778,50 @@ class AxolotlInputConfig( is_preprocess: bool | None = None preprocess_iterable: bool | None = None - total_num_tokens: int | None = None + total_num_tokens: int | None = Field( + default=None, + json_schema_extra={"description": "Total number of tokens - internal use"}, + ) total_supervised_tokens: int | None = None - sample_packing_eff_est: float | None = None + sample_packing_eff_est: float | None = Field( + default=None, + json_schema_extra={ + "description": "You can set these packing optimizations AFTER starting a training at least once. The trainer will provide recommended values for these values." + }, + ) axolotl_config_path: str | None = None - is_falcon_derived_model: bool | None = Field(default=None) - is_llama_derived_model: bool | None = Field(default=None) - is_mistral_derived_model: bool | None = Field(default=None) - is_qwen_derived_model: bool | None = Field(default=None) + is_falcon_derived_model: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Internal use only - Used to identify which the model is based on" + }, + ) + is_llama_derived_model: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Internal use only - Used to identify which the model is based on" + }, + ) + is_mistral_derived_model: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Internal use only - Used to identify which the model is based on. Please note that if you set this to true, `padding_side` will be set to 'left' by default" + }, + ) + is_qwen_derived_model: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Internal use only - Used to identify which the model is based on" + }, + ) - plugins: list[str] | None = Field(default=None) - - @field_validator("seed", mode="after") - @classmethod - def set_default_seed(cls, seed): - if seed is None: - LOG.info("`seed` not set in config; setting to 42") - seed = 42 - return seed - - @field_validator("datasets", mode="before") - @classmethod - def deprecate_sharegpt_datasets(cls, datasets): - for _, ds_cfg in enumerate(datasets): - # Handle both dict and pydantic model cases - ds_type = ( - ds_cfg.get("type") - if isinstance(ds_cfg, dict) - else getattr(ds_cfg, "type", None) - ) - if not ds_type: - continue - - # skip if it's a dict (for custom user instruction prompt) - if isinstance(ds_type, dict): - continue - - if isinstance(ds_type, str) and ds_type.startswith("sharegpt"): - raise ValueError( - "`type: sharegpt.*` is deprecated. Please use `type: chat_template` instead." - ) - - return datasets + plugins: list[str] | None = Field( + default=None, + json_schema_extra={ + "description": "Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available plugins or doc below for more details. https://docs.axolotl.ai/docs/custom_integrations.html" + }, + ) @field_serializer("datasets") def datasets_serializer( @@ -378,960 +831,9 @@ class AxolotlInputConfig( return [ds_config.model_dump(exclude_none=True) for ds_config in ds_configs] return None - @model_validator(mode="before") - @classmethod - def check_attention_fields(cls, data): - fields = ( - "xformers_attention", - "sdp_attention", - "s2_attention", - "flash_attention", - "flex_attention", - ) - non_empty_count = sum(1 for field in fields if data.get(field)) - - if non_empty_count > 1: - raise ValueError(f"Only one of {', '.join(fields)} must be set") - return data - - @model_validator(mode="before") - @classmethod - def check_batch_size_fields(cls, data): - fields = ("micro_batch_size", "gradient_accumulation_steps", "batch_size") - non_empty_count = sum(1 for field in fields if data.get(field)) - - if non_empty_count < 2: - raise ValueError(f"At least two of {', '.join(fields)} must be set") - return data - - @model_validator(mode="before") - @classmethod - def check_pretraining_w_max_steps(cls, data): - if data.get("pretraining_dataset") and not data.get("max_steps"): - raise ValueError( - "max_steps must be set when using iterable pretraining_dataset, Trainer can't infer length and schedule optimizer/learning rate without it!" - ) - return data - - @model_validator(mode="before") - @classmethod - def check_pretraining_w_group_by_length(cls, data): - if data.get("pretraining_dataset") and data.get("group_by_length"): - LOG.warning( - "You probably want to disable group_by_length as it will force a streamed dataset to download completely." - ) - return data - - @model_validator(mode="before") - @classmethod - def check_pretraining_split_batches_accelerate(cls, data): - # alternatively set ACCELERATE_SPLIT_BATCHES=False - if data.get("pretraining_dataset"): - accelerator_config = data.get("accelerator_config", {}) - if not accelerator_config: - data["accelerator_config"] = { - "split_batches": False, - "dispatch_batches": False, - } - else: - if accelerator_config.get("split_batches") is None: - data["accelerator_config"]["split_batches"] = False - if accelerator_config.get("dispatch_batches") is None: - data["accelerator_config"]["dispatch_batches"] = False - return data - - @model_validator(mode="before") - @classmethod - def check_gptq_w_revision(cls, data): - if data.get("gptq") and data.get("revision_of_model"): - raise ValueError( - "revision_of_model is not supported for GPTQ models. " - + "Please download the model from HuggingFace Hub manually for correct branch, " - + "point to its path, and remove revision_of_model from the config." - ) - return data - - @model_validator(mode="before") - @classmethod - # pylint: disable=duplicate-code - def check_chat_template_config(cls, data): - # if chat_template is set to jinja, chat_template_jinja is required - if data.get("chat_template") == ChatTemplate.jinja and not data.get( - "chat_template_jinja" - ): - raise ValueError( - "chat_template_jinja is required when chat_template is set to jinja" - ) - - # If chat_template_jinja is set, set chat_template to jinja - if data.get("chat_template_jinja") and not data.get("chat_template"): - data["chat_template"] = ChatTemplate.jinja - - return data - - @model_validator(mode="before") - @classmethod - def check_sample_packing_wo_flash(cls, data): - if ( - data.get("sample_packing") - and not data.get("flash_attention") - and not data.get("sdp_attention") - and not data.get("flex_attention") - and not data.get("xformers_attention") - ): - LOG.warning( - "sample_packing without flash, sdp, xformers or flex attention does not handle cross sample decontamination." - ) - - return data - - @model_validator(mode="before") - @classmethod - def check_sample_packing_with_s2attn(cls, data): - if data.get("sample_packing") and data.get("s2_attention"): - raise ValueError( - "Received `sample_packing=true` and `s2_attention=true`; however, \ - shifted-sparse attention does not currently support sample packing." - ) - return data - - @model_validator(mode="before") - @classmethod - def check_batch_flattening_fa(cls, data): - if data.get("batch_flattening"): - batch_flattening_auto = data.get("batch_flattening") == "auto" - if not data.get("flash_attention") and not batch_flattening_auto: - raise ValueError("batch_flattening requires flash attention") - if data.get("sample_packing") and not batch_flattening_auto: - raise ValueError("batch_flattening not compatible with sample_packing") - if data.get("micro_batch_size") == 1 and not batch_flattening_auto: - LOG.warning("batch_flattening has no effect with micro_batch_size == 1") - - if ( - batch_flattening_auto - and data.get("flash_attention") - and not data.get("sample_packing") - and data.get("micro_batch_size") > 1 - ): - data["batch_flattening"] = True - elif batch_flattening_auto: - data["batch_flattening"] = False - - return data - - @model_validator(mode="before") - @classmethod - def check_sample_packing_w_rl(cls, data): - if data.get("sample_packing") and data.get("rl"): - raise ValueError("`sample_packing: true` does not work with RLHF training") - return data - - @model_validator(mode="before") - @classmethod - def hint_sample_packing_padding(cls, data): - if data.get("sample_packing"): - pad_to_sequence_len = data.get("pad_to_sequence_len") - if pad_to_sequence_len is False: - LOG.warning( - "`pad_to_sequence_len: true` is recommended when using sample_packing" - ) - elif pad_to_sequence_len is None: - LOG.info( - "Setting `pad_to_sequence_len: true` to prevent memory leaks when sample_packing" - ) - data["pad_to_sequence_len"] = True - return data - - @model_validator(mode="before") - @classmethod - def hint_reward_model_pad(cls, data): - if data.get("reward_model") and not data.get("pad_to_sequence_len"): - LOG.warning( - "`pad_to_sequence_len: true` is recommended when using reward_model" - ) - if data.get("pad_to_sequence_len") is None: - data["pad_to_sequence_len"] = True - return data - - @model_validator(mode="before") - @classmethod - def check_gas_bsz(cls, data): - if data.get("gradient_accumulation_steps") and data.get("batch_size"): - raise ValueError( - "please set only one of gradient_accumulation_steps or batch_size" - ) - return data - - @model_validator(mode="before") - @classmethod - def hint_eval_train_mbsz(cls, data): - if ( - data.get("eval_batch_size") - and data.get("micro_batch_size") - and data.get("eval_batch_size") != data.get("micro_batch_size") - ): - LOG.warning( - "eval_batch_size != micro_batch_size. This can lead to VRAM instability." - ) - return data - - @model_validator(mode="before") - @classmethod - def check_push_ds_auth(cls, data): - if ( - data.get("push_dataset_to_hub") - and data.get("hf_use_auth_token") is not True - ): - raise ValueError( - "Require cfg.hf_use_auth_token to be True for push_dataset_to_hub" - ) - return data - - @model_validator(mode="after") - def check_falcon_fsdp(self): - if (self.base_model and "falcon" in self.base_model.lower()) and self.fsdp: - raise ValueError("FSDP is not supported for falcon models") - return self - - @model_validator(mode="after") - def check_mpt_checkpointing(self): - if ( - self.base_model and "mpt" in self.base_model.lower() - ) and self.gradient_checkpointing: - raise ValueError("gradient_checkpointing is not supported for MPT models") - return self - - @model_validator(mode="after") - def check_offload_grad_checkpointing(self): - if self.gradient_checkpointing and self.gradient_checkpointing == "unsloth": - LOG.warning( - "`unsloth` is deprecated for gradient_checkpointing, use `offload`" - ) - self.gradient_checkpointing = "offload" - return self - - @model_validator(mode="after") - def check_better_transformers(self): - if self.flash_optimum is True: - if self.adapter: - LOG.warning( - "BetterTransformers probably doesn't work with PEFT adapters" - ) - if self.fp16 or self.bf16: - raise ValueError("AMP is not supported with BetterTransformer") - if self.float16 is not True and self.bfloat16 is not True: - LOG.warning( - "You should probably set bfloat16 or float16 to true to " - "load the model in float16 for BetterTransformers" - ) - return self - - @model_validator(mode="after") - def check_adamw_optimizer_params(self): - if any([self.adam_beta1, self.adam_beta2, self.adam_epsilon]) and ( - not self.optimizer or "adamw" not in str(self.optimizer).lower() - ): - LOG.warning("adamw hyperparameters found, but no adamw optimizer set") - return self - - @model_validator(mode="before") - @classmethod - def check_lr_groups(cls, data): - if data.get("lr_groups") and data.get("loraplus_lr_ratio"): - raise ValueError("lr_groups and loraplus_lr_ratio cannot be used together.") - return data - - @model_validator(mode="before") - @classmethod - def check_saves(cls, data): - if ( - data.get("save_strategy") - and data.get("save_steps") - and data.get("save_strategy") != "steps" - ): - raise ValueError( - "save_strategy and save_steps mismatch. Please set save_strategy to 'steps' or remove save_steps." - ) - if data.get("saves_per_epoch") and data.get("save_steps"): - raise ValueError( - "save_steps and saves_per_epoch are mutually exclusive and cannot be used together." - ) - return data - - @model_validator(mode="before") - @classmethod - def check_push_save(cls, data): - if data.get("hub_model_id") and ( - data.get("save_strategy") not in ["steps", "epoch", None] - ): - LOG.warning( - "hub_model_id is set without any models being saved. To save a model, set save_strategy." - ) - return data - - @model_validator(mode="before") - @classmethod - def check_evals(cls, data): - if ( - data.get("eval_strategy") - and data.get("eval_steps") - and data.get("eval_strategy") != "steps" - ): - raise ValueError( - "eval_strategy and eval_steps mismatch. Please set eval_strategy to 'steps' or remove eval_steps." - ) - - if ( - data.get("val_set_size") == 0 - and (data.get("eval_steps") or data.get("eval_strategy")) - and not data.get("test_datasets") - and data.get("eval_strategy") != "no" - ): - raise ValueError( - "eval_steps and eval_strategy are not supported with val_set_size == 0" - ) - if data.get("evals_per_epoch") and data.get("eval_steps"): - raise ValueError( - "eval_steps and evals_per_epoch are mutually exclusive and cannot be used together." - ) - if ( - data.get("evals_per_epoch") - and data.get("eval_strategy") - and data.get("eval_strategy") != "steps" - ): - raise ValueError( - "eval_strategy must be empty or set to `steps` when used with evals_per_epoch." - ) - - if data.get("do_bench_eval") and not ( - data.get("evals_per_epoch") or data.get("eval_steps") - ): - raise ValueError( - "do_bench_eval requires evals_per_epoch or eval_steps to be set." - ) - return data - - @model_validator(mode="before") - @classmethod - def check_test_datasets_bench(cls, data): - if ( - data.get("do_bench_eval") - and not data.get("test_datasets") - and not data.get("val_set_size") - ): - LOG.warning( - "`do_bench_eval` needs a test dataset to run evals, adding an empty test_dataset." - ) - data["test_datasets"] = [{"path": "axolotl-ai-co/empty-test-ds"}] - return data - - @model_validator(mode="before") - @classmethod - def check_eval_packing(cls, data): - # TODO also should check test_datasets and val_set_size as we can skip - # if there are no eval datasets/splits - if ( - data.get("sample_packing") - and data.get("eval_table_size") - and data.get("eval_sample_packing") is not False - ): - raise ValueError( - "eval_table_size and eval_sample_packing are not supported together with sample_packing. Please set 'eval_sample_packing' to false." - ) - if ( - data.get("sample_packing") - and data.get("eval_sample_packing") is None - and not data.get("eval_table_size") - ): - LOG.info( - "explicitly setting `eval_sample_packing` to match `sample_packing`" - ) - data["eval_sample_packing"] = True - - if ( - data.get("sample_packing") - and data.get("eval_sample_packing") is False - and data.get("remove_unused_columns") is None - ): - LOG.info( - "setting `remove_unused_columns: false` for when sample_packing and eval_sample_packing don't match" - ) - data["remove_unused_columns"] = False - - return data - - @model_validator(mode="before") - @classmethod - def check_mm_prepare(cls, data): - if data.get("skip_prepare_dataset"): - if data.get("remove_unused_columns") is None: - LOG.info( - "setting `remove_unused_columns: false` for skip_prepare_dataset" - ) - data["remove_unused_columns"] = False - - return data - - @model_validator(mode="before") - @classmethod - def check_warmup(cls, data): - if data.get("warmup_steps") and data.get("warmup_ratio"): - raise ValueError("warmup_steps and warmup_ratio are mutually exclusive") - return data - - @model_validator(mode="before") - @classmethod - def check_neftune(cls, data): - if data.get("noisy_embedding_alpha") and not data.get("neftune_noise_alpha"): - data["neftune_noise_alpha"] = data["noisy_embedding_alpha"] - del data["noisy_embedding_alpha"] - elif data.get("noisy_embedding_alpha") and not data.get("neftune_noise_alpha"): - raise ValueError( - "noisy_embedding_alpha is deprecated, use neftune_noise_alpha; both are set, please remove the deprecated noisy_embedding_alpha setting" - ) - return data - - @field_validator("neftune_noise_alpha") - @classmethod - def validate_neftune_noise_alpha(cls, neftune_noise_alpha): - if neftune_noise_alpha is not None and neftune_noise_alpha <= 0.0: - raise ValueError("neftune_noise_alpha must be > 0.0") - return neftune_noise_alpha - - @model_validator(mode="after") - def check_rl_beta(self): - if self.dpo_beta and not self.rl_beta: - self.rl_beta = self.dpo_beta - del self.dpo_beta - return self - - @model_validator(mode="after") - def check_simpo_warmup(self): - if self.rl is RLType.SIMPO and self.warmup_ratio: - raise ValueError( - "warmup_ratio is not supported with the simpo trainer. Please use `warmup_steps` instead" - ) - return self - - @model_validator(mode="before") - @classmethod - def check_frozen(cls, data): - if ( - data.get("adapter") - and data.get("peft_layers_to_transform") - and data.get("unfrozen_parameters") - ): - raise ValueError( - "`unfrozen_parameters` used with `peft_layers_to_transform` can have unexpected behavior." - ) - - return data - - @model_validator(mode="before") - @classmethod - def check_peft_layers_pattern(cls, data): - if data.get("peft_layers_pattern") and not data.get("peft_layers_to_transform"): - raise ValueError( - "peft_layers_pattern requires peft_layers_to_transform to be set" - ) - return data - - @model_validator(mode="after") - def check_fft_possible_bad_config(self): - if ( - # pylint: disable=too-many-boolean-expressions - not (self.bf16 or self.bfloat16) - and (self.fp16 or self.float16) - and not self.adapter - and not self.flash_attention - and self.sample_packing - ): - LOG.warning( - "Full fine tune w/o FA2 w/ sample packing and fp16/float16 is likely to raise errors. Try LoRA." - ) - # ValueError: Attempting to unscale FP16 gradients. - # OR - # RuntimeError: expected mat1 and mat2 to have the same dtype, but got: float != c10::Half - return self - - @model_validator(mode="after") - def check_fused_lora(self): - if self.adapter in ["lora", "qlora"] and ( - self.flash_attn_fuse_qkv or self.flash_attn_fuse_mlp - ): - raise ValueError("Fused modules are not supported with LoRA/QLoRA") - return self - - @model_validator(mode="after") - def hint_lora_8bit(self): - loftq = ( - self.peft and self.peft.loftq_config and self.peft.loftq_config.loftq_bits - ) - if not self.load_in_8bit and self.adapter == "lora" and not loftq: - LOG.warning("We recommend setting `load_in_8bit: true` for LORA finetuning") - return self - - @model_validator(mode="after") - def check_early_stopping(self): - if self.early_stopping_patience: - if not self.save_steps or not self.eval_steps: - raise ValueError( - "`early_stopping_patience` requires save_steps and eval_steps to be set. eval_steps should evenly divide save_steps." - ) - if self.save_steps % self.eval_steps != 0: - raise ValueError( - "`early_stopping_patience` requires that eval_steps should evenly divide save_steps." - ) - return self - - @model_validator(mode="after") - def check_relora(self): - if self.relora_steps: - if self.adapter not in ("lora", "qlora"): - raise ValueError("cfg.adapter must be lora or qlora to use ReLoRA") - - if self.fsdp: - raise ValueError("fsdp not supported with ReLoRA") - - if self.deepspeed: - raise ValueError("deepspeed not supported with ReLoRA") - - if self.lr_scheduler == "one_cycle": - raise ValueError( - "ReLoRA is not compatible with the one_cycle scheduler" - ) - - if self.flash_attn_fuse_qkv or self.flash_attn_fuse_mlp: - raise ValueError("Fused modules are not supported with ReLoRA") - return self - - @model_validator(mode="before") - @classmethod - def check_mem_mismatch(cls, data): - if ( - data.get("max_memory") is not None - and data.get("gpu_memory_limit") is not None - ): - raise ValueError( - "max_memory and gpu_memory_limit are mutually exclusive and cannot be used together." - ) - return data - - @model_validator(mode="before") - @classmethod - def check_use_reentrant_mismatch(cls, data): - if ( - data.get("unfrozen_parameters") - and data.get("gradient_checkpointing_kwargs") - and data.get("gradient_checkpointing_kwargs", {}).get("use_reentrant") - is True - ): - # https://github.com/huggingface/transformers/issues/21381 - raise ValueError( - "`use_reentrant` must be false when used with partially frozen model." - ) - return data - - @model_validator(mode="before") - @classmethod - def warn_qlora_zero3_w_use_reentrant(cls, data): - if ( - data.get("adapter") == "qlora" - and data.get("gradient_checkpointing_kwargs", {}) - and data.get("gradient_checkpointing_kwargs", {}).get("use_reentrant") - is False - and data.get("deepspeed", "") is not None - and "zero3" in data.get("deepspeed", "") - ): - # may result in: - # torch.utils.checkpoint.CheckpointError: torch.utils.checkpoint: - # Recomputed values for the following tensors have different metadata - # than during the forward pass. - LOG.warning( - "qlora + zero3 with use_reentrant: false may result in a CheckpointError about recomputed values" - ) - return data - - @model_validator(mode="before") - @classmethod - def check_val_w_test_datasets(cls, data): - if data.get("test_datasets") and data.get("val_set_size"): - raise ValueError( - "non-zero val_set_size should not be used with test_datasets configuration" - ) - return data - - @model_validator(mode="before") - @classmethod - def check_eval_strategy(cls, data): - if ( - data.get("evaluation_strategy") is not None - and data.get("eval_strategy") is None - ): - LOG.info( - "explicitly setting `eval_strategy` from the `evaluation_strategy`" - ) - data["eval_strategy"] = data.get("evaluation_strategy") - return data - - @model_validator(mode="before") - @classmethod - def check_fsdp_offload_w_8bit_optimizer(cls, data): - if ( - data.get("fsdp") - and "8bit" in data.get("optimizer", "") - and data.get("fsdp_config") - and data["fsdp_config"].get("fsdp_offload_params") - and str(data["fsdp_config"].get("fsdp_version")) != "2" - ): - raise ValueError( - f"FSDP Offload not compatible with {data.get('optimizer')}" - ) - if ( - data.get("fsdp") - and "8bit" in data.get("optimizer", "") - and data.get("fsdp_config") - and str(data["fsdp_config"].get("fsdp_version")) == "2" - ): - if data.get("optimizer", "") in ["adamw_8bit", "adamw_bnb_8bit"]: - # CUDA ops errors with bnb 8bit optimizer + FSDP2 - raise ValueError( - f"FSDP2 not compatible with {data.get('optimizer')}, use `adamw_torch_8bit` instead" - ) - - return data - - @model_validator(mode="before") - @classmethod - def check_fsdp_sharded_state_dict_w_safetensors(cls, data): - if ( - data.get("fsdp") - and data.get("save_safetensors") - and data.get("fsdp_config") - and data["fsdp_config"].get("fsdp_state_dict_type") == "SHARDED_STATE_DICT" - ): - raise ValueError( - "FSDP SHARDED_STATE_DICT not compatible with save_safetensors" - ) - return data - - @model_validator(mode="before") - @classmethod - def check_causal_lm_evals(cls, data): - if data.get("do_causal_lm_eval") and data.get("eval_sample_packing"): - raise ValueError( - "do_causal_lm_eval is enabled, eval_sample_packing must be set to False" - ) - - if data.get("eval_causal_lm_metrics"): - if not isinstance(data.get("eval_causal_lm_metrics"), list): - raise ValueError("eval_causal_lm_metrics must be a list") - # only ["sacrebleu", "comet", "ter", "chrf"] supported - if set(data.get("eval_causal_lm_metrics")) - SUPPORTED_METRICS: - raise ValueError( - f"eval_causal_lm_metrics must be one of {SUPPORTED_METRICS}" - ) - return data - - @model_validator(mode="before") - @classmethod - def check_dataset_or_pretraining_dataset(cls, data): - if data.get("datasets") is None and data.get("pretraining_dataset") is None: - raise ValueError("either datasets or pretraining_dataset is required") - return data - - @model_validator(mode="before") - @classmethod - def check_xentropy_patch_conflicts(cls, data): - if data.get("flash_attn_cross_entropy") and data.get( - "unsloth_cross_entropy_loss" - ): - raise ValueError( - "flash_attn_cross_entropy and unsloth_cross_entropy_loss cannot be both enabled" - ) - return data - - @model_validator(mode="before") - @classmethod - def check_qlora_unsloth(cls, data): - if ( - data.get("unsloth_lora_mlp") - or data.get("unsloth_lora_qkv") - or data.get("unsloth_lora_o") - ): - if data.get("adapter") == "lora" and data.get("load_in_8bit"): - raise ValueError( - "unsloth_lora_mlp, unsloth_lora_qkv, and unsloth_lora_o are not compatible with 8-bit LoRA" - ) - return data - - @model_validator(mode="before") - @classmethod - def check_lora_kernel_8bit(cls, data): - if ( - data.get("lora_mlp_kernel") - or data.get("lora_qkv_kernel") - or data.get("lora_o_kernel") - ): - if data.get("adapter") == "lora" and data.get("load_in_8bit"): - raise ValueError( - "lora_mlp_kernel, lora_qkv_kernel, and lora_o_kernel are not compatible with 8-bit LoRA" - ) - return data - - @model_validator(mode="before") - @classmethod - def check_lora_kernel_rl(cls, data): - if ( - data.get("lora_mlp_kernel") - or data.get("lora_qkv_kernel") - or data.get("lora_o_kernel") - ) and data.get("rl"): - raise ValueError( - "lora_mlp_kernel, lora_qkv_kernel, and lora_o_kernel are not compatible with RL at the moment." - ) - return data - - @model_validator(mode="before") - @classmethod - def check_lora_axolotl_unsloth(cls, data): - is_lora_kernel = any( - data.get(k) for k in ["lora_mlp_kernel", "lora_qkv_kernel", "lora_o_kernel"] - ) - is_unsloth_lora = any( - data.get(k) - for k in ["unsloth_lora_mlp", "unsloth_lora_qkv", "unsloth_lora_o"] - ) - if is_lora_kernel and is_unsloth_lora: - raise ValueError( - "both lora_mlp_kernel and unsloth_lora_mlp cannot be true (similarly for lora_qkv_kernel, lora_o_kernel)" - ) - return data - - @model_validator(mode="before") - @classmethod - def check_torch_compile_deepspeed(cls, data): - if data.get("deepspeed") and data.get("torch_compile"): - raise ValueError( - "torch_compile should be set within your deepspeed config file" - ) - return data - - @model_validator(mode="before") - @classmethod - def check_npu_config(cls, data): - if is_torch_npu_available(): - # check attention config - attn_list = ["flash_attention", "sdp_attention", "s2_attention"] - for attn in attn_list: - if data.get(attn): - raise NotImplementedError( - f"{attn} is currently not supported in Ascend npu, please disable this configuration." - ) - - # check quant config - if data.get("optimizer") is not None and "bit" in data.get("optimizer"): - optimizer = data.get("optimizer") - raise NotImplementedError( - f"{optimizer} is currently not supported in Ascend npu, choose another one please." - ) - - quant_list = ["load_in_8bit", "load_in_4bit"] - for quant in quant_list: - if data.get(quant): - raise NotImplementedError( - f"Quantification is currently not supported in Ascend npu, please disable {quant}." - ) - - # check dtype config - if data.get("tf32"): - raise NotImplementedError( - "tf32 dtype is currently not supported in Ascend npu, please disable this configuration" - ) - - return data - - @model_validator(mode="before") - @classmethod - def check_rl_config_gradient_checkpointing(cls, data): - # TODO: SalmanMohammadi - # Distributed RL with QLoRA + gradient checkpointing - # and use_reentrant = True is broken upstream in TRL - # pylint: disable=too-many-boolean-expressions - if ( - data.get("rl") - and data.get("gradient_checkpointing") - and data.get("gradient_checkpointing_kwargs") - and data.get("gradient_checkpointing_kwargs").get("use_reentrant") - and data.get("load_in_4bit") - and data.get("adapter") == "qlora" - and data.get("capabilities") - and data.get("capabilities").get("n_gpu", 1) > 1 - ): - raise ValueError( - "The `use_reentrant: True` implementation of gradient checkpointing " - "is not supported for distributed RL training with QLoRA. Please set " - "`use_reentrant: False` in `gradient_checkpointing_kwargs`." - ) - return data - - @model_validator(mode="before") - @classmethod - def check_kto_config(cls, data): - if data.get("rl") == "kto": - if data.get("sample_packing") or data.get("eval_sample_packing"): - raise ValueError("sample_packing is not supported with kto") - - if data.get("remove_unused_columns") is not False: - raise ValueError("Set `remove_unused_columns: False` when using kto") - - return data - - @model_validator(mode="before") - @classmethod - def check_grpo_liger_sequence_parallel(cls, data): - if ( - data.get("rl") == "grpo" - and data.get("trl", {}) - and data.get("trl").get("use_liger_loss") - and data.get("sequence_parallel_degree", 1) > 1 - ): - raise ValueError("GRPO + SP + Liger not currently supported") - return data - - @model_validator(mode="after") - def check_sequence_parallel_degree(self): - if not self.sequence_parallel_degree: - self.sequence_parallel_degree = 1 - elif self.sequence_parallel_degree > 1: - if not self.flash_attention: - raise ValueError( - "flash_attention: true must be set with sequence_parallel_degree > 1" - ) - - if self.sample_packing and getattr(self, "micro_batch_size", 1) > 1: - raise ValueError( - "micro_batch_size must be set to 1 when sample_packing is enabled " - "due to a `ring-flash-attn` requirement" - ) - - try: - import ring_flash_attn # noqa: F401 # pylint:disable=unused-import - except ImportError as exception: - raise ImportError( - "sequence_parallel_degree > 1 but ring_flash_attn is not installed. " - "Please install it with `pip install axolotl[ring-flash-attn] " - "or `pip install ring-flash-attn>=0.1.4`." - ) from exception - - # TODO: monkeypatch / callback to average losses correctly across SP ranks - # / fix gradient scaling across SP ranks. Losses, grads should be scaled - # according to the proportion of non-padding tokens per rank. - LOG.warning( - "Sequence parallelism (SP) is enabled with " - f"sequence_parallel_degree={self.sequence_parallel_degree}. " - "Please note that logged losses may differ slightly to the non-SP " - "losses due to transformers Trainer implementation details. " - "Please see https://github.com/axolotl-ai-cloud/axolotl/pull/2495#issuecomment-2784022042 " - "for more details." - ) - - return self - - @model_validator(mode="after") - def validate_ring_attn_func(self): - if getattr(self, "sequence_parallel_degree", 1) == 1: - return self - - if self.ring_attn_func is not None: - self.ring_attn_func = RingAttnFunc(self.ring_attn_func) - else: - # Default ring attention function selection - sample_packing = getattr(self, "sample_packing", False) - self.ring_attn_func = ( - RingAttnFunc.VARLEN_LLAMA3 - if sample_packing - else RingAttnFunc.BATCH_RING - ) - - return self - - @model_validator(mode="before") - @classmethod - def check_muon_deepspeed_fsdp(cls, data): - if data.get("optimizer") == "muon" and ( - data.get("deepspeed") or data.get("fsdp") or data.get("fsdp_config") - ): - raise ValueError( - "Muon optimizer is currently incompatible with DeepSpeed and FSDP" - ) - return data - - @model_validator(mode="before") - @classmethod - def check_tokenizer_use_mistral_common(cls, data): - if data.get("tokenizer_use_mistral_common") is None: - if any( - "magistral" in name.lower() - for name in [ - data.get("base_model", ""), - data.get("base_model_config", ""), - data.get("tokenizer_config", ""), - ] - ): - LOG.warning( - "tokenizer_use_mistral_common auto inferred to True for Magistral models. Please set it to True explicitly if you want to use mistral-common tokenizer." - ) - data["tokenizer_use_mistral_common"] = True - - return data - - @field_validator("tokenizer_use_mistral_common", mode="after") - @classmethod - def check_mistral_common_import(cls, tokenizer_use_mistral_common): - if tokenizer_use_mistral_common: - try: - import mistral_common # noqa: F401 # pylint:disable=unused-import - except ImportError as exception: - raise ImportError( - "mistral-common is required for mistral models. Please install it with `pip install axolotl` or `pip install -e .`." - ) from exception - - return tokenizer_use_mistral_common - - @model_validator(mode="before") - @classmethod - def check_mistral_common_incompatible_options(cls, data): - if not data.get("tokenizer_use_mistral_common"): - return data - - # NOTE: mistral-common tokenizer is not compatible with editing tokenizer at the moment - - if data.get("added_tokens_overrides"): - raise ValueError( - "added_tokens_overrides is not supported with mistral-common tokenizer" - ) - - if data.get("special_tokens"): - raise ValueError( - "special_tokens override is not supported with mistral-common tokenizer" - ) - - if data.get("tokens"): - raise ValueError( - "tokens override is not supported with mistral-common tokenizer" - ) - - if data.get("chat_template"): - raise ValueError( - "Setting chat_template is not supported with mistral-common tokenizer" - ) - - return data - class AxolotlConfigWCapabilities(AxolotlInputConfig): - """wrapper to valdiate gpu capabilities with the configured options""" + """wrapper to valdiate GPU capabilities with the configured options""" capabilities: GPUCapabilities env_capabilities: EnvCapabilities @@ -1375,13 +877,7 @@ class AxolotlConfigWCapabilities(AxolotlInputConfig): return data - @model_validator(mode="before") - @classmethod - def check_fsdp_deepspeed(cls, data): - if data.get("deepspeed") and data.get("fsdp"): - raise ValueError("deepspeed and fsdp cannot be used together.") - return data - + # pylint: disable=duplicate-code @model_validator(mode="before") @classmethod def check_multigpu_unsloth(cls, data): @@ -1397,6 +893,7 @@ class AxolotlConfigWCapabilities(AxolotlInputConfig): ) return data + # pylint: disable=duplicate-code @model_validator(mode="before") @classmethod def check_multigpu_lora_kernels(cls, data): diff --git a/src/axolotl/utils/schemas/datasets.py b/src/axolotl/utils/schemas/datasets.py index c71f9be77..d9459feb9 100644 --- a/src/axolotl/utils/schemas/datasets.py +++ b/src/axolotl/utils/schemas/datasets.py @@ -1,6 +1,8 @@ """Pydantic models for datasets-related configuration""" -from pydantic import BaseModel, model_validator +from typing import Literal + +from pydantic import BaseModel, Field, model_validator from axolotl.utils.schemas.enums import ChatTemplate from axolotl.utils.schemas.utils import handle_legacy_message_fields_logic @@ -9,57 +11,178 @@ from axolotl.utils.schemas.utils import handle_legacy_message_fields_logic class UserDefinedPrompterType(BaseModel): """Structure for user defined prompt types""" - system_prompt: str | None = None - system_format: str | None = None + system_prompt: str | None = Field( + default=None, + json_schema_extra={"description": "Custom user instruction prompt"}, + ) + system_format: str | None = Field( + default=None, + json_schema_extra={"description": "Use {system} as key to be replaced"}, + ) field_system: str | None = None field_instruction: str | None = None field_input: str | None = None field_output: str | None = None - format: str | None = None - no_input_format: str | None = None - field: str | None = None + format: str | None = Field( + default=None, + json_schema_extra={ + "description": "Customizable to be single line or multi-line. Use {instruction}/{input} as key to be replaced. 'format' can include {input}" + }, + ) + no_input_format: str | None = Field( + default=None, + json_schema_extra={"description": "'no_input_format' cannot include {input}"}, + ) + field: str | None = Field( + default=None, + json_schema_extra={ + "description": "For `completion` datsets only, uses the provided field instead of `text` column" + }, + ) class SFTDataset(BaseModel): """SFT configuration subset""" - path: str | None = None - split: str | None = None - type: str | UserDefinedPrompterType | None = None + path: str | None = Field( + default=None, + json_schema_extra={ + "description": "HuggingFace dataset repo | s3:// | gs:// | path to local file or directory" + }, + ) + split: str | None = Field( + default=None, + json_schema_extra={"description": "name of dataset split to load from"}, + ) + type: str | UserDefinedPrompterType | None = Field( + default=None, + json_schema_extra={ + "description": "The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]" + }, + ) input_transform: str | None = None - shards: int | None = None - shards_idx: int | None = None - preprocess_shards: int | None = None + shards: int | None = Field( + default=None, + json_schema_extra={ + "description": "split dataset into N pieces (use with shards_idx)" + }, + ) + shards_idx: int | None = Field( + default=None, + json_schema_extra={"description": "the index of sharded dataset to use"}, + ) + preprocess_shards: int | None = Field( + default=None, + json_schema_extra={ + "description": "process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)" + }, + ) conversation: str | None = None # Do not make this too strict or it will break the validator to choose different dataset class - chat_template: ChatTemplate | str | None = None - chat_template_jinja: str | None = None - data_files: str | list[str] | None = None + chat_template: ChatTemplate | str | None = Field( + default=None, + json_schema_extra={ + "description": "The name of the chat template to use for training, following values are supported: tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default. alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py. tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field." + }, + ) + chat_template_jinja: str | None = Field( + default=None, + json_schema_extra={ + "description": "Custom jinja chat template. Used only if `chat_template: jinja` or empty." + }, + ) + data_files: str | list[str] | None = Field( + default=None, json_schema_extra={"description": "path to source data files"} + ) input_format: str | None = None - name: str | None = None - ds_type: str | None = None + name: str | None = Field( + default=None, + json_schema_extra={"description": "name of dataset configuration to load"}, + ) + ds_type: str | None = Field( + default=None, + json_schema_extra={"description": "defines the datatype when path is a file"}, + ) field: str | None = None field_human: str | None = None field_model: str | None = None - field_messages: str | None = None - field_tools: str | None = None + field_messages: str | None = Field( + default=None, + json_schema_extra={ + "description": 'Key containing the messages (default: "messages")' + }, + ) + field_tools: str | None = Field( + default=None, + json_schema_extra={ + "description": 'Key containing the tools (default: "tools"). Must be a list[dict] and follow [JSON schema](https://json-schema.org/learn/getting-started-step-by-step).' + }, + ) # deprecated, use message_property_mappings message_field_role: str | None = None # deprecated, use message_property_mappings message_field_content: str | None = None - message_property_mappings: dict[str, str] | None = None - message_field_training: str | None = None - message_field_training_detail: str | None = None - split_thinking: bool | None = None + message_property_mappings: dict[str, str] | None = Field( + default=None, + json_schema_extra={ + "description": "Mapping of properties from the input dataset to the chat template. (default: message_property_mappings={'role':'role', 'content':'content'}) If a property exists in the template but not in this mapping, the system will attempt to load it directly from the message using the property name as the key. Example: In the mapping below, 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and used as 'content' in the chat template." + }, + ) + message_field_training: str | None = Field( + default=None, + json_schema_extra={ + "description": "The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`." + }, + ) + message_field_training_detail: str | None = Field( + default=None, + json_schema_extra={ + "description": "The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn. The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train)." + }, + ) + split_thinking: bool | None = Field( + default=None, + json_schema_extra={ + "description": "(for Qwen3 template only) Whether to split the assistant content based on a reasoning trace inside delimited tags" + }, + ) logprobs_field: str | None = None temperature: float | None = None - roles_to_train: list[str] | None = None - train_on_eos: str | None = None - roles: dict[str, list[str]] | None = None - drop_system_message: bool | None = None - trust_remote_code: bool | None = False - revision: str | None = None + roles_to_train: list[str] | None = Field( + default=None, + json_schema_extra={ + "description": "Roles to train on. The tokens from these roles will be considered for the loss." + }, + ) + train_on_eos: Literal["all", "turn", "last"] | None = Field( + default=None, + json_schema_extra={ + "description": "Which EOS tokens to train on in the conversation. Possible values are: all: train on all EOS tokens, turn (default): train on the EOS token at the end of each trainable turn, last: train on the last EOS token in the conversation" + }, + ) + roles: dict[str, list[str]] | None = Field( + default=None, + json_schema_extra={ + "description": 'Roles mapping in the messages. The format is {target_role: [source_roles]}. All source roles will be mapped to the target role. The default is: user: ["human", "user"], assistant: ["gpt", "assistant"], system: ["system"], tool: ["tool"]' + }, + ) + drop_system_message: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to drop the system turn from the dataset. Only works with chat_template. This does not drop the default system message from chat_template if it exists. If you wish to, we recommend using a custom jinja template with the default system message removed or adding a system turn with empty content." + }, + ) + trust_remote_code: bool | None = Field( + default=False, + json_schema_extra={"description": "Trust remote code for untrusted source"}, + ) + revision: str | None = Field( + default=None, + json_schema_extra={ + "description": "The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets." + }, + ) @model_validator(mode="before") @classmethod diff --git a/src/axolotl/utils/schemas/deprecated.py b/src/axolotl/utils/schemas/deprecated.py index b8904136e..972fe0ccf 100644 --- a/src/axolotl/utils/schemas/deprecated.py +++ b/src/axolotl/utils/schemas/deprecated.py @@ -60,10 +60,30 @@ class RemappedParameters(BaseModel): """Parameters that have been remapped to other names""" overrides_of_model_config: dict[str, Any] | None = Field( - default=None, alias="model_config" + default=None, + alias="model_config", + json_schema_extra={ + "description": "optional overrides to the base model configuration" + }, ) overrides_of_model_kwargs: dict[str, Any] | None = Field( - default=None, alias="model_kwargs" + default=None, + alias="model_kwargs", + json_schema_extra={ + "description": "optional overrides the base model loading from_pretrained" + }, + ) + type_of_model: str | None = Field( + default=None, + alias="model_type", + json_schema_extra={ + "description": "If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too" + }, + ) + revision_of_model: str | None = Field( + default=None, + alias="model_revision", + json_schema_extra={ + "description": "You can specify to choose a specific model revision from huggingface hub" + }, ) - type_of_model: str | None = Field(default=None, alias="model_type") - revision_of_model: str | None = Field(default=None, alias="model_revision") diff --git a/src/axolotl/utils/schemas/enums.py b/src/axolotl/utils/schemas/enums.py index d09ab6387..bfef14d53 100644 --- a/src/axolotl/utils/schemas/enums.py +++ b/src/axolotl/utils/schemas/enums.py @@ -1,5 +1,7 @@ """Enums for Axolotl input config""" +# pylint: disable=invalid-name + from enum import Enum import torch @@ -8,81 +10,81 @@ import torch class TorchIntDType(Enum): """Torch integer data types - `getattr` guards against torch < 2.6 which does not support int4""" - uint1 = getattr(torch, "uint1", None) # pylint: disable=invalid-name - uint2 = getattr(torch, "uint2", None) # pylint: disable=invalid-name - uint3 = getattr(torch, "uint3", None) # pylint: disable=invalid-name - uint4 = getattr(torch, "uint4", None) # pylint: disable=invalid-name - uint5 = getattr(torch, "uint5", None) # pylint: disable=invalid-name - uint6 = getattr(torch, "uint6", None) # pylint: disable=invalid-name - uint7 = getattr(torch, "uint7", None) # pylint: disable=invalid-name - int4 = getattr(torch, "int4", None) # pylint: disable=invalid-name - int8 = getattr(torch, "int8", None) # pylint: disable=invalid-name + uint1 = getattr(torch, "uint1", None) + uint2 = getattr(torch, "uint2", None) + uint3 = getattr(torch, "uint3", None) + uint4 = getattr(torch, "uint4", None) + uint5 = getattr(torch, "uint5", None) + uint6 = getattr(torch, "uint6", None) + uint7 = getattr(torch, "uint7", None) + int4 = getattr(torch, "int4", None) + int8 = getattr(torch, "int8", None) class RLType(str, Enum): """RL trainer type configuration subset""" - DPO = "dpo" # pylint: disable=invalid-name - GRPO = "grpo" # pylint: disable=invalid-name - IPO = "ipo" # pylint: disable=invalid-name - ORPO = "orpo" # pylint: disable=invalid-name - KTO = "kto" # pylint: disable=invalid-name - SIMPO = "simpo" # pylint: disable=invalid-name + DPO = "dpo" + GRPO = "grpo" + IPO = "ipo" + ORPO = "orpo" + KTO = "kto" + SIMPO = "simpo" class ChatTemplate(str, Enum): """Chat templates configuration subset""" - alpaca = "alpaca" # pylint: disable=invalid-name - chatml = "chatml" # pylint: disable=invalid-name - mistral_v1 = "mistral_v1" # pylint: disable=invalid-name - mistral_v2v3 = "mistral_v2v3" # pylint: disable=invalid-name - mistral_v3_tekken = "mistral_v3_tekken" # pylint: disable=invalid-name - mistral_v7_tekken = "mistral_v7_tekken" # pylint: disable=invalid-name - gemma = "gemma" # pylint: disable=invalid-name - cohere = "cohere" # pylint: disable=invalid-name - llama3 = "llama3" # pylint: disable=invalid-name - llama3_2_vision = "llama3_2_vision" # pylint: disable=invalid-name - llama4 = "llama4" # pylint: disable=invalid-name - phi_3 = "phi_3" # pylint: disable=invalid-name - phi_35 = "phi_35" # pylint: disable=invalid-name - deepseek_v2 = "deepseek_v2" # pylint: disable=invalid-name - deepseek_v3 = "deepseek_v3" # pylint: disable=invalid-name - jamba = "jamba" # pylint: disable=invalid-name - jinja = "jinja" # pylint: disable=invalid-name - qwen_25 = "qwen_25" # pylint: disable=invalid-name - qwen3 = "qwen3" # pylint: disable=invalid-name - tokenizer_default = "tokenizer_default" # pylint: disable=invalid-name - exaone = "exaone" # pylint: disable=invalid-name - metharme = "metharme" # pylint: disable=invalid-name - pixtral = "pixtral" # pylint: disable=invalid-name - llava = "llava" # pylint: disable=invalid-name - qwen2_vl = "qwen2_vl" # pylint: disable=invalid-name - gemma3 = "gemma3" # pylint: disable=invalid-name - command_a = "command_a" # pylint: disable=invalid-name - command_a_tool_use = "command_a_tool_use" # pylint: disable=invalid-name - command_a_rag = "command_a_rag" # pylint: disable=invalid-name - aya = "aya" # pylint: disable=invalid-name + alpaca = "alpaca" + chatml = "chatml" + mistral_v1 = "mistral_v1" + mistral_v2v3 = "mistral_v2v3" + mistral_v3_tekken = "mistral_v3_tekken" + mistral_v7_tekken = "mistral_v7_tekken" + gemma = "gemma" + cohere = "cohere" + llama3 = "llama3" + llama3_2_vision = "llama3_2_vision" + llama4 = "llama4" + phi_3 = "phi_3" + phi_35 = "phi_35" + deepseek_v2 = "deepseek_v2" + deepseek_v3 = "deepseek_v3" + jamba = "jamba" + jinja = "jinja" + qwen_25 = "qwen_25" + qwen3 = "qwen3" + tokenizer_default = "tokenizer_default" + exaone = "exaone" + metharme = "metharme" + pixtral = "pixtral" + llava = "llava" + qwen2_vl = "qwen2_vl" + gemma3 = "gemma3" + command_a = "command_a" + command_a_tool_use = "command_a_tool_use" + command_a_rag = "command_a_rag" + aya = "aya" class CustomSupportedOptimizers(str, Enum): """Custom supported optimizers""" - optimi_adamw = "optimi_adamw" # pylint: disable=invalid-name - ao_adamw_4bit = "ao_adamw_4bit" # pylint: disable=invalid-name - ao_adamw_8bit = "ao_adamw_8bit" # pylint: disable=invalid-name - ao_adamw_fp8 = "ao_adamw_fp8" # pylint: disable=invalid-name - adopt_adamw = "adopt_adamw" # pylint: disable=invalid-name - came_pytorch = "came_pytorch" # pylint: disable=invalid-name - muon = "muon" # pylint: disable=invalid-name + optimi_adamw = "optimi_adamw" + ao_adamw_4bit = "ao_adamw_4bit" + ao_adamw_8bit = "ao_adamw_8bit" + ao_adamw_fp8 = "ao_adamw_fp8" + adopt_adamw = "adopt_adamw" + came_pytorch = "came_pytorch" + muon = "muon" class RingAttnFunc(str, Enum): """Enum class for supported `ring-flash-attn` implementations""" - # VARLEN_RING = "varlen_ring" - # VARLEN_ZIGZAG = "varlen_zigzag" VARLEN_LLAMA3 = "varlen_llama3" BATCH_RING = "batch_ring" + # VARLEN_RING = "varlen_ring" + # VARLEN_ZIGZAG = "varlen_zigzag" # BATCH_ZIGZAG = "batch_zigzag" # BATCH_STRIPE = "batch_stripe" diff --git a/src/axolotl/utils/schemas/integrations.py b/src/axolotl/utils/schemas/integrations.py index 4843e3592..7332c7d39 100644 --- a/src/axolotl/utils/schemas/integrations.py +++ b/src/axolotl/utils/schemas/integrations.py @@ -13,10 +13,21 @@ class MLFlowConfig(BaseModel): """MLFlow configuration subset""" use_mlflow: bool | None = None - mlflow_tracking_uri: str | None = None - mlflow_experiment_name: str | None = None - mlflow_run_name: str | None = None - hf_mlflow_log_artifacts: bool | None = None + mlflow_tracking_uri: str | None = Field( + default=None, json_schema_extra={"description": "URI to mlflow"} + ) + mlflow_experiment_name: str | None = Field( + default=None, json_schema_extra={"description": "Your experiment name"} + ) + mlflow_run_name: str | None = Field( + default=None, json_schema_extra={"description": "Your run name"} + ) + hf_mlflow_log_artifacts: bool | None = Field( + default=None, + json_schema_extra={ + "description": "set to true to copy each saved checkpoint on each save to mlflow artifact registry" + }, + ) class LISAConfig(BaseModel): @@ -40,13 +51,33 @@ class WandbConfig(BaseModel): """Wandb configuration subset""" use_wandb: bool | None = None - wandb_name: str | None = None - wandb_run_id: str | None = None - wandb_mode: str | None = None - wandb_project: str | None = None - wandb_entity: str | None = None + wandb_name: str | None = Field( + default=None, + json_schema_extra={"description": "Set the name of your wandb run"}, + ) + wandb_run_id: str | None = Field( + default=None, json_schema_extra={"description": "Set the ID of your wandb run"} + ) + wandb_mode: str | None = Field( + default=None, + json_schema_extra={ + "description": '"offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb' + }, + ) + wandb_project: str | None = Field( + default=None, json_schema_extra={"description": "Your wandb project name"} + ) + wandb_entity: str | None = Field( + default=None, + json_schema_extra={"description": "A wandb Team name if using a Team"}, + ) wandb_watch: str | None = None - wandb_log_model: str | None = None + wandb_log_model: str | None = Field( + default=None, + json_schema_extra={ + "description": '"checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training' + }, + ) @model_validator(mode="before") @classmethod @@ -64,14 +95,52 @@ class WandbConfig(BaseModel): class CometConfig(BaseModel): """Comet configuration subset""" - use_comet: bool | None = None - comet_api_key: str | None = None - comet_workspace: str | None = None - comet_project_name: str | None = None - comet_experiment_key: str | None = None - comet_mode: str | None = None - comet_online: bool | None = None - comet_experiment_config: dict[str, Any] | None = None + use_comet: bool | None = Field( + default=None, + json_schema_extra={"description": "Enable or disable Comet integration."}, + ) + comet_api_key: str | None = Field( + default=None, + json_schema_extra={ + "description": "API key for Comet. Recommended to set via `comet login`." + }, + ) + comet_workspace: str | None = Field( + default=None, + json_schema_extra={ + "description": "Workspace name in Comet. Defaults to the user's default workspace." + }, + ) + comet_project_name: str | None = Field( + default=None, + json_schema_extra={ + "description": "Project name in Comet. Defaults to Uncategorized." + }, + ) + comet_experiment_key: str | None = Field( + default=None, + json_schema_extra={ + "description": "Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key." + }, + ) + comet_mode: str | None = Field( + default=None, + json_schema_extra={ + "description": 'Create a new experiment ("create") or log to an existing one ("get"). Default ("get_or_create") auto-selects based on configuration.' + }, + ) + comet_online: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Set to True to log data to Comet server, or False for offline storage. Default is True." + }, + ) + comet_experiment_config: dict[str, Any] | None = Field( + default=None, + json_schema_extra={ + "description": "Dictionary for additional configuration settings, see the doc for more details." + }, + ) class GradioConfig(BaseModel): diff --git a/src/axolotl/utils/schemas/model.py b/src/axolotl/utils/schemas/model.py index aafb52152..6f995996d 100644 --- a/src/axolotl/utils/schemas/model.py +++ b/src/axolotl/utils/schemas/model.py @@ -12,20 +12,55 @@ class ModelInputConfig(BaseModel): model_config = {"protected_namespaces": ()} - base_model: str - base_model_config: str | None = None + base_model: str = Field( + json_schema_extra={ + "description": "This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This can also be a relative path to a model on disk" + } + ) + base_model_config: str | None = Field( + default=None, + json_schema_extra={ + "description": "If the base_model repo on hf hub doesn't include configuration .json files, You can set that here, or leave this empty to default to base_model" + }, + ) cls_model_config: str | None = None - tokenizer_config: str | None = None - tokenizer_use_fast: bool | None = None - tokenizer_legacy: bool | None = None - tokenizer_use_mistral_common: bool | None = None + tokenizer_config: str | None = Field( + default=None, + json_schema_extra={ + "description": "Optional tokenizer configuration path in case you want to use a different tokenizer than the one defined in the base model" + }, + ) + tokenizer_use_fast: bool | None = Field( + default=None, + json_schema_extra={ + "description": "use_fast option for tokenizer loading from_pretrained, default to True" + }, + ) + tokenizer_legacy: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to use the legacy tokenizer setting, defaults to True" + }, + ) + tokenizer_use_mistral_common: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether to use mistral-common tokenizer. If set to True, it will use the mistral-common tokenizer." + }, + ) tokenizer_type: str | None = Field( - default=None, json_schema_extra={"description": "transformers tokenizer class"} + default=None, + json_schema_extra={ + "description": "Corresponding tokenizer for the model AutoTokenizer is a good choice" + }, ) processor_type: str | None = Field( default=None, json_schema_extra={"description": "transformers processor class"} ) - trust_remote_code: bool | None = None + trust_remote_code: bool | None = Field( + default=None, + json_schema_extra={"description": "Trust remote code for untrusted source"}, + ) @field_validator("trust_remote_code") @classmethod @@ -40,10 +75,23 @@ class ModelInputConfig(BaseModel): class ModelOutputConfig(BaseModel): """model save configuration subset""" - output_dir: str = Field(default="./model-out") - hub_model_id: str | None = None - hub_strategy: str | None = None - save_safetensors: bool | None = True + output_dir: str = Field( + default="./model-out", + json_schema_extra={"description": "Where to save the full-finetuned model to"}, + ) + hub_model_id: str | None = Field( + default=None, json_schema_extra={"description": "push checkpoints to hub"} + ) + hub_strategy: str | None = Field( + default=None, + json_schema_extra={"description": "how to push checkpoints to hub"}, + ) + save_safetensors: bool | None = Field( + default=True, + json_schema_extra={ + "description": "Save model as safetensors (require safetensors package). Default True" + }, + ) class SpecialTokensConfig(BaseModel): diff --git a/src/axolotl/utils/schemas/peft.py b/src/axolotl/utils/schemas/peft.py index 5d408e1fe..4b31ce018 100644 --- a/src/axolotl/utils/schemas/peft.py +++ b/src/axolotl/utils/schemas/peft.py @@ -9,7 +9,7 @@ class LoftQConfig(BaseModel): """LoftQ configuration subset""" loftq_bits: int = Field( - default=4, json_schema_extra={"description": "Quantization bits for LoftQ"} + default=4, json_schema_extra={"description": "typically 4 bits"} ) # loftq_iter: int = Field(default=1, json_schema_extra={"description": "Alternating iterations for LoftQ"}) @@ -17,31 +17,78 @@ class LoftQConfig(BaseModel): class PeftConfig(BaseModel): """peftq configuration subset""" - loftq_config: LoftQConfig | None = None + loftq_config: LoftQConfig | None = Field( + default=None, + json_schema_extra={ + "description": "Configuration options for loftq initialization for LoRA" + }, + ) class LoraConfig(BaseModel): """Peft / LoRA configuration subset""" - load_in_8bit: bool | None = Field(default=False) - load_in_4bit: bool | None = Field(default=False) + load_in_8bit: bool | None = Field( + default=False, + json_schema_extra={ + "description": "This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer" + }, + ) + load_in_4bit: bool | None = Field( + default=False, json_schema_extra={"description": "Use bitsandbytes 4 bit"} + ) - adapter: str | None = None - lora_model_dir: str | None = None + adapter: str | None = Field( + default=None, + json_schema_extra={ + "description": "If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model" + }, + ) + lora_model_dir: str | None = Field( + default=None, + json_schema_extra={ + "description": "If you already have a lora model trained that you want to load, put that here. This means after training, if you want to test the model, you should set this to the value of `output_dir`. Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`." + }, + ) lora_r: int | None = None lora_alpha: int | None = None lora_fan_in_fan_out: bool | None = None lora_target_modules: str | list[str] | None = None - lora_target_linear: bool | None = None - lora_modules_to_save: list[str] | None = None + lora_target_linear: bool | None = Field( + default=None, + json_schema_extra={"description": "If true, will target all linear modules"}, + ) + lora_modules_to_save: list[str] | None = Field( + default=None, + json_schema_extra={ + "description": "If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens. For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities." + }, + ) lora_dropout: float | None = 0.0 - peft_layers_to_transform: list[int] | None = None + peft_layers_to_transform: list[int] | None = Field( + default=None, + json_schema_extra={ + "description": "The layer indices to transform, otherwise, apply to all layers" + }, + ) peft_layers_pattern: list[str] | None = None peft: PeftConfig | None = None - peft_use_dora: bool | None = None - peft_use_rslora: bool | None = None - peft_layer_replication: list[tuple[int, int]] | None = None - peft_init_lora_weights: bool | str | None = None + peft_use_dora: bool | None = Field( + default=None, json_schema_extra={"description": "Whether to use DoRA."} + ) + peft_use_rslora: bool | None = Field( + default=None, json_schema_extra={"description": "Whether to use RSLoRA."} + ) + peft_layer_replication: list[tuple[int, int]] | None = Field( + default=None, + json_schema_extra={"description": "List of layer indices to replicate."}, + ) + peft_init_lora_weights: bool | str | None = Field( + default=None, + json_schema_extra={ + "description": "How to initialize LoRA weights. Default to True which is MS original implementation." + }, + ) qlora_sharded_model_loading: bool | None = Field( default=False, @@ -49,9 +96,24 @@ class LoraConfig(BaseModel): "description": "load qlora model in sharded format for FSDP using answer.ai technique." }, ) - lora_on_cpu: bool | None = None - gptq: bool | None = None - bnb_config_kwargs: dict[str, Any] | None = None + lora_on_cpu: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge" + }, + ) + gptq: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Whether you are training a 4-bit GPTQ quantized model" + }, + ) + bnb_config_kwargs: dict[str, Any] | None = Field( + default=None, + json_schema_extra={ + "description": "optional overrides to the bnb 4bit quantization configuration" + }, + ) loraplus_lr_ratio: float | None = Field( default=None, @@ -62,7 +124,7 @@ class LoraConfig(BaseModel): loraplus_lr_embedding: float | None = Field( default=1e-6, json_schema_extra={ - "description": "loraplus learning rate for lora embedding layers." + "description": "loraplus learning rate for lora embedding layers. Default value is 1e-6." }, ) @@ -125,8 +187,29 @@ class LoraConfig(BaseModel): class ReLoRAConfig(BaseModel): """ReLoRA configuration subset""" - relora_steps: int | None = None - relora_warmup_steps: int | None = None - relora_anneal_steps: int | None = None - relora_prune_ratio: float | None = None - relora_cpu_offload: bool | None = None + relora_steps: int | None = Field( + default=None, + json_schema_extra={"description": "Number of steps per ReLoRA restart"}, + ) + relora_warmup_steps: int | None = Field( + default=None, + json_schema_extra={"description": "Number of per-restart warmup steps"}, + ) + relora_anneal_steps: int | None = Field( + default=None, + json_schema_extra={ + "description": "Number of anneal steps for each relora cycle" + }, + ) + relora_prune_ratio: float | None = Field( + default=None, + json_schema_extra={ + "description": "threshold for optimizer magnitude when pruning" + }, + ) + relora_cpu_offload: bool | None = Field( + default=None, + json_schema_extra={ + "description": "True to perform lora weight merges on cpu during restarts, for modest gpu memory savings" + }, + ) diff --git a/src/axolotl/utils/schemas/quantization.py b/src/axolotl/utils/schemas/quantization.py index fe2cdb1fe..090640c7b 100644 --- a/src/axolotl/utils/schemas/quantization.py +++ b/src/axolotl/utils/schemas/quantization.py @@ -15,17 +15,22 @@ class QATConfig(BaseModel): """ activation_dtype: TorchIntDType | None = Field( - default=None, description="Activation dtype" + default=None, + description='Fake quantization layout to use for activation quantization. Valid options are "int4" and "int8"', ) weight_dtype: TorchIntDType = Field( - default=TorchIntDType.int8, description="Weight dtype" + default=TorchIntDType.int8, + description='Fake quantization layout to use for weight quantization. Valid options are "int4" and "int8"', ) quantize_embedding: bool | None = Field( default=False, description="Quantize embedding" ) - group_size: int | None = Field(default=32, description="Group size") + group_size: int | None = Field( + default=32, + description="The number of elements in each group for per-group fake quantization", + ) fake_quant_after_n_steps: int | None = Field( - default=None, description="Fake quant after n steps" + default=None, description="The number of steps to apply fake quantization after" ) @field_validator("activation_dtype", "weight_dtype", mode="before") @@ -44,15 +49,20 @@ class PTQConfig(BaseModel): """ weight_dtype: TorchIntDType = Field( - default=TorchIntDType.int8, description="Weight dtype" + default=TorchIntDType.int8, + description="Fake quantization layout to use for weight quantization. Valid options are uintX for X in [1, 2, 3, 4, 5, 6, 7], or int4, or int8", ) activation_dtype: TorchIntDType | None = Field( - default=None, description="Activation dtype" + default=None, + description='Fake quantization layout to use for activation quantization. Valid options are "int4" and "int8"', ) quantize_embedding: bool | None = Field( - default=None, description="Quantize embedding" + default=None, description="Whether to quantize the embedding layer." + ) + group_size: int | None = Field( + default=32, + description="The number of elements in each group for per-group fake quantization", ) - group_size: int | None = Field(default=32, description="Group size") @field_validator("activation_dtype", "weight_dtype", mode="before") @classmethod diff --git a/src/axolotl/utils/schemas/training.py b/src/axolotl/utils/schemas/training.py index ad7f899ac..4d88cc9e6 100644 --- a/src/axolotl/utils/schemas/training.py +++ b/src/axolotl/utils/schemas/training.py @@ -23,10 +23,17 @@ class LrGroup(BaseModel): class HyperparametersConfig(BaseModel): """Training hyperparams configuration subset""" - gradient_accumulation_steps: int | None = Field(default=1) + gradient_accumulation_steps: int | None = Field( + default=1, + json_schema_extra={ + "description": "If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps." + }, + ) micro_batch_size: int | None = Field( default=1, - json_schema_extra={"description": "per gpu micro batch size for training"}, + json_schema_extra={ + "description": "The number of samples to include in each batch. This is the number of samples sent to each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps" + }, ) batch_size: int | None = Field( default=None, @@ -41,45 +48,99 @@ class HyperparametersConfig(BaseModel): }, ) - auto_find_batch_size: bool | None = None + auto_find_batch_size: bool | None = Field( + default=None, + json_schema_extra={ + "description": "whether to find batch size that fits in memory. Passed to underlying transformers Trainer" + }, + ) - train_on_inputs: bool | None = False - group_by_length: bool | None = None + train_on_inputs: bool | None = Field( + default=False, + json_schema_extra={ + "description": "Whether to mask out or include the human's prompt from the training labels" + }, + ) + group_by_length: bool | None = Field( + default=None, + json_schema_extra={ + "description": "Group similarly sized data to minimize padding. May be slower to start, as it must download and sort the entire dataset. Note that training loss may have an oscillating pattern with this enabled." + }, + ) learning_rate: str | float embedding_lr: float | None = None embedding_lr_scale: float | None = None - weight_decay: float | None = 0.0 - optimizer: (OptimizerNames | CustomSupportedOptimizers) | None = ( - OptimizerNames.ADAMW_TORCH_FUSED + weight_decay: float | None = Field( + default=0.0, json_schema_extra={"description": "Specify weight decay"} + ) + optimizer: (OptimizerNames | CustomSupportedOptimizers) | None = Field( + default=OptimizerNames.ADAMW_TORCH_FUSED, + json_schema_extra={"description": "Specify optimizer"}, ) optim_args: (str | dict[str, Any]) | None = Field( default=None, - json_schema_extra={"description": "Optional arguments to supply to optimizer."}, + json_schema_extra={ + "description": "Dictionary of arguments to pass to the optimizer" + }, ) optim_target_modules: (list[str] | Literal["all_linear"]) | None = Field( default=None, json_schema_extra={ - "description": "The target modules to optimize, i.e. the module names that you would like to train." + "description": "The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm" + }, + ) + torchdistx_path: str | None = Field( + default=None, + json_schema_extra={ + "description": "Path to torch distx for optim 'adamw_anyprecision'" }, ) - torchdistx_path: str | None = None lr_scheduler: (SchedulerType | Literal["one_cycle"] | Literal["rex"]) | None = ( SchedulerType.COSINE ) - lr_scheduler_kwargs: dict[str, Any] | None = None + lr_scheduler_kwargs: dict[str, Any] | None = Field( + default=None, + json_schema_extra={ + "description": "Specify a scheduler and kwargs to use with the optimizer" + }, + ) lr_quadratic_warmup: bool | None = None - cosine_min_lr_ratio: float | None = None - cosine_constant_lr_ratio: float | None = None - lr_div_factor: float | None = None + cosine_min_lr_ratio: float | None = Field( + default=None, + json_schema_extra={ + "description": "decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr" + }, + ) + cosine_constant_lr_ratio: float | None = Field( + default=None, + json_schema_extra={ + "description": "freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step" + }, + ) + lr_div_factor: float | None = Field( + default=None, json_schema_extra={"description": "Learning rate div factor"} + ) lr_groups: list[LrGroup] | None = None - adam_epsilon: float | None = None - adam_epsilon2: float | None = None - adam_beta1: float | None = None - adam_beta2: float | None = None - adam_beta3: float | None = None - max_grad_norm: float | None = None + adam_epsilon: float | None = Field( + default=None, json_schema_extra={"description": "adamw hyperparams"} + ) + adam_epsilon2: float | None = Field( + default=None, json_schema_extra={"description": "only used for CAME Optimizer"} + ) + adam_beta1: float | None = Field( + default=None, json_schema_extra={"description": "adamw hyperparams"} + ) + adam_beta2: float | None = Field( + default=None, json_schema_extra={"description": "adamw hyperparams"} + ) + adam_beta3: float | None = Field( + default=None, json_schema_extra={"description": "only used for CAME Optimizer"} + ) + max_grad_norm: float | None = Field( + default=None, json_schema_extra={"description": "Gradient clipping max norm"} + ) num_epochs: float = Field(default=1.0) @field_validator("batch_size") diff --git a/src/axolotl/utils/schemas/trl.py b/src/axolotl/utils/schemas/trl.py index 37b71dba8..d1b18a56e 100644 --- a/src/axolotl/utils/schemas/trl.py +++ b/src/axolotl/utils/schemas/trl.py @@ -10,12 +10,14 @@ class TRLConfig(BaseModel): beta: float | None = Field( default=None, - json_schema_extra={"description": "Beta for RL training"}, + json_schema_extra={ + "description": "Beta parameter for the RL training. Same as `rl_beta`. Use" + }, ) max_completion_length: int | None = Field( default=None, json_schema_extra={ - "description": "Maximum length of the completion for RL training" + "description": "Maximum length of the completion for RL training." }, ) @@ -23,81 +25,69 @@ class TRLConfig(BaseModel): # Ref: https://github.com/huggingface/trl/blob/26d86757a7c7e24e397ea44f57ecce6031dfac01/trl/trainer/grpo_config.py#L23 use_vllm: bool = Field( default=False, - json_schema_extra={"description": "Whether to use VLLM for RL training"}, + json_schema_extra={"description": "Whether to use VLLM for RL training."}, ) vllm_server_host: str | None = Field( default="0.0.0.0", # nosec B104 - json_schema_extra={"description": "Host of the vLLM server to connect to"}, + json_schema_extra={"description": "Host of the vLLM server to connect to."}, ) vllm_server_port: int | None = Field( default=8000, - json_schema_extra={"description": "Port of the vLLM server to connect to"}, + json_schema_extra={"description": "Port of the vLLM server to connect to."}, ) vllm_server_timeout: int | None = Field( default=None, json_schema_extra={ - "description": "Total timeout duration in seconds to wait for the vLLM server to be up. If the server is not up " - "after the timeout, a `ConnectionError` is raised." + "description": "Total timeout (in seconds) to wait for the vLLM server to respond." }, ) vllm_guided_decoding_regex: str | None = Field( default=None, - json_schema_extra={ - "description": "Regex for vLLM guided decoding. If `None` (default), guided decoding is disabled." - }, + json_schema_extra={"description": "Regex for vLLM guided decoding."}, ) reward_funcs: list[str] | None = Field( default=None, - json_schema_extra={"description": "List of reward functions to load"}, + json_schema_extra={ + "description": "List of reward functions to load. Paths must be importable from current dir." + }, ) reward_weights: list[float] | None = Field( default=None, json_schema_extra={ - "description": "Weights for each reward function. Must match the number of reward functions." + "description": "List of reward weights for the reward functions." }, ) num_generations: int | None = Field( default=None, - json_schema_extra={ - "description": "Number of generations to sample. The global batch size (num_processes * per_device_batch_size) must be divisible by this value." - }, + json_schema_extra={"description": "Number of generations to sample."}, ) log_completions: bool | None = Field( default=False, - json_schema_extra={"description": "Whether to log completions"}, + json_schema_extra={"description": "Whether to log completions."}, ) num_completions_to_print: int | None = Field( default=None, json_schema_extra={ - "description": "Number of completions to print. If `log_completions` is `True`, this will be the number of completions logged." + "description": "Number of completions to print when log_completions is True." }, ) sync_ref_model: bool | None = Field( default=False, - json_schema_extra={ - "description": ( - "Whether to sync the reference model every `ref_model_sync_steps` " - "steps, using the `ref_model_mixup_alpha` parameter." - ) - }, + json_schema_extra={"description": "Whether to sync the reference model."}, ) ref_model_mixup_alpha: float | None = Field( default=0.9, - json_schema_extra={ - "description": "Mixup alpha for the reference model. Requires `sync_ref_model=True`." - }, + json_schema_extra={"description": "Mixup alpha for the reference model."}, ) ref_model_sync_steps: int | None = Field( default=64, - json_schema_extra={ - "description": "Sync steps for the reference model. Requires `sync_ref_model=True`." - }, + json_schema_extra={"description": "Sync steps for the reference model."}, ) scale_rewards: bool = Field( default=True, json_schema_extra={ - "description": "Whether to scale the rewards for GRPO by dividing them by their standard deviation." + "description": "Whether to scale rewards by their standard deviation." }, ) @@ -124,13 +114,13 @@ class TRLConfig(BaseModel): repetition_penalty: float | None = Field( default=None, json_schema_extra={ - "description": "Float that penalizes new tokens based on whether they appear in the prompt and the generated text so far." + "description": "Penalty for tokens that appear in prompt and generated text." }, ) num_iterations: int | None = Field( default=None, json_schema_extra={ - "description": "Number of iterations per batch (denoted as μ in the algorithm) for GRPO." + "description": "Number of iterations per batch (μ) for GRPO." }, ) epsilon: float | None = Field( @@ -152,12 +142,12 @@ class TRLConfig(BaseModel): loss_type: str | None = Field( default=None, json_schema_extra={ - "description": "Specifies the loss formulation to use. Supported values are `grpo`, `bnpo`, and `dr_grpo`." + "description": "Loss formulation to use. Supported values: grpo, bnpo, dr_grpo." }, ) mask_truncated_completions: bool = Field( default=False, json_schema_extra={ - "description": "When enabled, truncated completions are excluded from the loss calculation." + "description": "Whether to exclude truncated completions from loss calculation." }, ) diff --git a/src/axolotl/utils/schemas/validation.py b/src/axolotl/utils/schemas/validation.py new file mode 100644 index 000000000..5a6bf43b3 --- /dev/null +++ b/src/axolotl/utils/schemas/validation.py @@ -0,0 +1,1073 @@ +"""Module with validation methods for config pydantic model.""" + +# pylint: disable=too-many-lines + +import logging + +from pydantic import ( + field_validator, + model_validator, +) +from transformers.utils.import_utils import is_torch_npu_available + +from axolotl.utils.schemas.enums import ChatTemplate, RingAttnFunc, RLType + +LOG = logging.getLogger(__name__) + +SUPPORTED_METRICS = {"sacrebleu", "comet", "ter", "chrf", "perplexity"} + + +class DatasetValidationMixin: + """Validation methods related to dataset configuration.""" + + @field_validator("seed", mode="after") + @classmethod + def set_default_seed(cls, seed): + if seed is None: + LOG.info("`seed` not set in config; setting to 42") + seed = 42 + return seed + + @field_validator("datasets", mode="before") + @classmethod + def deprecate_sharegpt_datasets(cls, datasets): + for _, ds_cfg in enumerate(datasets): + ds_type = ( + ds_cfg.get("type") + if isinstance(ds_cfg, dict) + else getattr(ds_cfg, "type", None) + ) + if not ds_type: + continue + + if isinstance(ds_type, dict): + continue + + if isinstance(ds_type, str) and ds_type.startswith("sharegpt"): + raise ValueError( + "`type: sharegpt.*` is deprecated. Please use `type: chat_template` instead." + ) + + return datasets + + @model_validator(mode="before") + @classmethod + def check_dataset_or_pretraining_dataset(cls, data): + if data.get("datasets") is None and data.get("pretraining_dataset") is None: + raise ValueError("either datasets or pretraining_dataset is required") + return data + + @model_validator(mode="before") + @classmethod + def check_push_ds_auth(cls, data): + if ( + data.get("push_dataset_to_hub") + and data.get("hf_use_auth_token") is not True + ): + raise ValueError( + "Require cfg.hf_use_auth_token to be True for push_dataset_to_hub" + ) + return data + + @model_validator(mode="before") + @classmethod + def check_val_w_test_datasets(cls, data): + if data.get("test_datasets") and data.get("val_set_size"): + raise ValueError( + "non-zero val_set_size should not be used with test_datasets configuration" + ) + return data + + @model_validator(mode="before") + @classmethod + def check_test_datasets_bench(cls, data): + if ( + data.get("do_bench_eval") + and not data.get("test_datasets") + and not data.get("val_set_size") + ): + LOG.warning( + "`do_bench_eval` needs a test dataset to run evals, adding an empty test_dataset." + ) + data["test_datasets"] = [{"path": "axolotl-ai-co/empty-test-ds"}] + return data + + @model_validator(mode="before") + @classmethod + def check_eval_packing(cls, data): + # TODO also should check test_datasets and val_set_size as we can skip + # if there are no eval datasets/splits + if ( + data.get("sample_packing") + and data.get("eval_table_size") + and data.get("eval_sample_packing") is not False + ): + raise ValueError( + "eval_table_size and eval_sample_packing are not supported together with sample_packing. Please set 'eval_sample_packing' to false." + ) + if ( + data.get("sample_packing") + and data.get("eval_sample_packing") is None + and not data.get("eval_table_size") + ): + LOG.info( + "explicitly setting `eval_sample_packing` to match `sample_packing`" + ) + data["eval_sample_packing"] = True + + if ( + data.get("sample_packing") + and data.get("eval_sample_packing") is False + and data.get("remove_unused_columns") is None + ): + LOG.info( + "setting `remove_unused_columns: false` for when sample_packing and eval_sample_packing don't match" + ) + data["remove_unused_columns"] = False + + return data + + @model_validator(mode="before") + @classmethod + def check_mm_prepare(cls, data): + if data.get("skip_prepare_dataset"): + if data.get("remove_unused_columns") is None: + LOG.info( + "setting `remove_unused_columns: false` for skip_prepare_dataset" + ) + data["remove_unused_columns"] = False + + return data + + +class AttentionValidationMixin: + """Validation methods related to attention mechanisms.""" + + @model_validator(mode="before") + @classmethod + def check_attention_fields(cls, data): + fields = ( + "xformers_attention", + "sdp_attention", + "s2_attention", + "flash_attention", + "flex_attention", + ) + non_empty_count = sum(1 for field in fields if data.get(field)) + + if non_empty_count > 1: + raise ValueError(f"Only one of {', '.join(fields)} must be set") + return data + + @model_validator(mode="before") + @classmethod + def check_sample_packing_without_attention(cls, data): + if ( + data.get("sample_packing") + and not data.get("flash_attention") + and not data.get("sdp_attention") + and not data.get("flex_attention") + and not data.get("xformers_attention") + ): + LOG.warning( + "sample_packing without flash, sdp, xformers or flex attention does not handle cross sample decontamination." + ) + return data + + @model_validator(mode="before") + @classmethod + def check_sample_packing_with_s2attn(cls, data): + if data.get("sample_packing") and data.get("s2_attention"): + raise ValueError( + "Received `sample_packing=true` and `s2_attention=true`; however, \ + shifted-sparse attention does not currently support sample packing." + ) + return data + + +class TrainingValidationMixin: + """Validation methods related to training configuration.""" + + @model_validator(mode="before") + @classmethod + def check_batch_size_fields(cls, data): + fields = ("micro_batch_size", "gradient_accumulation_steps", "batch_size") + non_empty_count = sum(1 for field in fields if data.get(field)) + + if non_empty_count < 2: + raise ValueError(f"At least two of {', '.join(fields)} must be set") + return data + + @model_validator(mode="before") + @classmethod + def hint_sample_packing_padding(cls, data): + if data.get("sample_packing"): + pad_to_sequence_len = data.get("pad_to_sequence_len") + if pad_to_sequence_len is False: + LOG.warning( + "`pad_to_sequence_len: true` is recommended when using sample_packing" + ) + elif pad_to_sequence_len is None: + LOG.info( + "Setting `pad_to_sequence_len: true` to prevent memory leaks when sample_packing" + ) + data["pad_to_sequence_len"] = True + return data + + @model_validator(mode="before") + @classmethod + def hint_reward_model_pad(cls, data): + if data.get("reward_model") and not data.get("pad_to_sequence_len"): + LOG.warning( + "`pad_to_sequence_len: true` is recommended when using reward_model" + ) + if data.get("pad_to_sequence_len") is None: + data["pad_to_sequence_len"] = True + return data + + @model_validator(mode="before") + @classmethod + def check_gas_bsz(cls, data): + if data.get("gradient_accumulation_steps") and data.get("batch_size"): + raise ValueError( + "please set only one of gradient_accumulation_steps or batch_size" + ) + return data + + @model_validator(mode="before") + @classmethod + def hint_eval_train_mbsz(cls, data): + if ( + data.get("eval_batch_size") + and data.get("micro_batch_size") + and data.get("eval_batch_size") != data.get("micro_batch_size") + ): + LOG.warning( + "eval_batch_size != micro_batch_size. This can lead to VRAM instability." + ) + return data + + @model_validator(mode="before") + @classmethod + def check_warmup(cls, data): + if data.get("warmup_steps") and data.get("warmup_ratio"): + raise ValueError("warmup_steps and warmup_ratio are mutually exclusive") + return data + + @model_validator(mode="before") + @classmethod + def check_saves(cls, data): + if ( + data.get("save_strategy") + and data.get("save_steps") + and data.get("save_strategy") != "steps" + ): + raise ValueError( + "save_strategy and save_steps mismatch. Please set save_strategy to 'steps' or remove save_steps." + ) + if data.get("saves_per_epoch") and data.get("save_steps"): + raise ValueError( + "save_steps and saves_per_epoch are mutually exclusive and cannot be used together." + ) + return data + + @model_validator(mode="before") + @classmethod + def check_push_save(cls, data): + if data.get("hub_model_id") and ( + data.get("save_strategy") not in ["steps", "epoch", None] + ): + LOG.warning( + "hub_model_id is set without any models being saved. To save a model, set save_strategy." + ) + return data + + @model_validator(mode="before") + @classmethod + def check_evals(cls, data): + if ( + data.get("eval_strategy") + and data.get("eval_steps") + and data.get("eval_strategy") != "steps" + ): + raise ValueError( + "eval_strategy and eval_steps mismatch. Please set eval_strategy to 'steps' or remove eval_steps." + ) + + if ( + data.get("val_set_size") == 0 + and (data.get("eval_steps") or data.get("eval_strategy")) + and not data.get("test_datasets") + and data.get("eval_strategy") != "no" + ): + raise ValueError( + "eval_steps and eval_strategy are not supported with val_set_size == 0" + ) + if data.get("evals_per_epoch") and data.get("eval_steps"): + raise ValueError( + "eval_steps and evals_per_epoch are mutually exclusive and cannot be used together." + ) + if ( + data.get("evals_per_epoch") + and data.get("eval_strategy") + and data.get("eval_strategy") != "steps" + ): + raise ValueError( + "eval_strategy must be empty or set to `steps` when used with evals_per_epoch." + ) + + if data.get("do_bench_eval") and not ( + data.get("evals_per_epoch") or data.get("eval_steps") + ): + raise ValueError( + "do_bench_eval requires evals_per_epoch or eval_steps to be set." + ) + return data + + @model_validator(mode="before") + @classmethod + def check_neftune(cls, data): + if data.get("noisy_embedding_alpha") and not data.get("neftune_noise_alpha"): + data["neftune_noise_alpha"] = data["noisy_embedding_alpha"] + del data["noisy_embedding_alpha"] + elif data.get("noisy_embedding_alpha") and data.get("neftune_noise_alpha"): + raise ValueError( + "noisy_embedding_alpha is deprecated, use neftune_noise_alpha; both are set, please remove the deprecated noisy_embedding_alpha setting" + ) + return data + + @model_validator(mode="after") + def check_fft_possible_bad_config(self): + if ( + # pylint: disable=too-many-boolean-expressions + not (self.bf16 or self.bfloat16) + and (self.fp16 or self.float16) + and not self.adapter + and not self.flash_attention + and self.sample_packing + ): + LOG.warning( + "Full fine tune w/o FA2 w/ sample packing and fp16/float16 is likely to raise errors. Try LoRA." + ) + # ValueError: Attempting to unscale FP16 gradients. + # OR + # RuntimeError: expected mat1 and mat2 to have the same dtype, but got: float != c10::Half + return self + + @model_validator(mode="before") + @classmethod + def check_use_reentrant_mismatch(cls, data): + if ( + data.get("unfrozen_parameters") + and data.get("gradient_checkpointing_kwargs") + and data.get("gradient_checkpointing_kwargs", {}).get("use_reentrant") + is True + ): + # https://github.com/huggingface/transformers/issues/21381 + raise ValueError( + "`use_reentrant` must be false when used with partially frozen model." + ) + return data + + @model_validator(mode="before") + @classmethod + def check_eval_strategy(cls, data): + if ( + data.get("evaluation_strategy") is not None + and data.get("eval_strategy") is None + ): + LOG.info( + "explicitly setting `eval_strategy` from the `evaluation_strategy`" + ) + data["eval_strategy"] = data.get("evaluation_strategy") + return data + + @model_validator(mode="before") + @classmethod + def check_causal_lm_evals(cls, data): + if data.get("do_causal_lm_eval") and data.get("eval_sample_packing"): + raise ValueError( + "do_causal_lm_eval is enabled, eval_sample_packing must be set to False" + ) + + if data.get("eval_causal_lm_metrics"): + if not isinstance(data.get("eval_causal_lm_metrics"), list): + raise ValueError("eval_causal_lm_metrics must be a list") + # only ["sacrebleu", "comet", "ter", "chrf"] supported + if set(data.get("eval_causal_lm_metrics")) - SUPPORTED_METRICS: + raise ValueError( + f"eval_causal_lm_metrics must be one of {SUPPORTED_METRICS}" + ) + return data + + @model_validator(mode="before") + @classmethod + def check_tokenizer_use_mistral_common(cls, data): + if data.get("tokenizer_use_mistral_common") is None: + if any( + "magistral" in name.lower() + for name in [ + data.get("base_model", ""), + data.get("base_model_config", ""), + data.get("tokenizer_config", ""), + ] + ): + LOG.warning( + "tokenizer_use_mistral_common auto inferred to True for Magistral models. Please set it to True explicitly if you want to use mistral-common tokenizer." + ) + data["tokenizer_use_mistral_common"] = True + + return data + + @field_validator("tokenizer_use_mistral_common", mode="after") + @classmethod + def check_mistral_common_import(cls, tokenizer_use_mistral_common): + if tokenizer_use_mistral_common: + try: + import mistral_common # noqa: F401 # pylint:disable=unused-import + except ImportError as exception: + raise ImportError( + "mistral-common is required for mistral models. Please install it with `pip install axolotl` or `pip install -e .`." + ) from exception + + return tokenizer_use_mistral_common + + @model_validator(mode="before") + @classmethod + def check_mistral_common_incompatible_options(cls, data): + if not data.get("tokenizer_use_mistral_common"): + return data + + # NOTE: mistral-common tokenizer is not compatible with editing tokenizer at the moment + + if data.get("added_tokens_overrides"): + raise ValueError( + "added_tokens_overrides is not supported with mistral-common tokenizer" + ) + + if data.get("special_tokens"): + raise ValueError( + "special_tokens override is not supported with mistral-common tokenizer" + ) + + if data.get("tokens"): + raise ValueError( + "tokens override is not supported with mistral-common tokenizer" + ) + + if data.get("chat_template"): + raise ValueError( + "Setting chat_template is not supported with mistral-common tokenizer" + ) + + return data + + +class LoRAValidationMixin: + """Validation methods related to LoRA/QLoRA configuration.""" + + @model_validator(mode="before") + @classmethod + def check_lr_groups(cls, data): + if data.get("lr_groups") and data.get("loraplus_lr_ratio"): + raise ValueError("lr_groups and loraplus_lr_ratio cannot be used together.") + return data + + @model_validator(mode="before") + @classmethod + def check_frozen(cls, data): + if ( + data.get("adapter") + and data.get("peft_layers_to_transform") + and data.get("unfrozen_parameters") + ): + raise ValueError( + "`unfrozen_parameters` used with `peft_layers_to_transform` can have unexpected behavior." + ) + return data + + @model_validator(mode="before") + @classmethod + def check_peft_layers_pattern(cls, data): + if data.get("peft_layers_pattern") and not data.get("peft_layers_to_transform"): + raise ValueError( + "peft_layers_pattern requires peft_layers_to_transform to be set" + ) + return data + + @model_validator(mode="before") + @classmethod + def check_qlora_unsloth(cls, data): + if ( + data.get("unsloth_lora_mlp") + or data.get("unsloth_lora_qkv") + or data.get("unsloth_lora_o") + ): + if data.get("adapter") == "lora" and data.get("load_in_8bit"): + raise ValueError( + "unsloth_lora_mlp, unsloth_lora_qkv, and unsloth_lora_o are not compatible with 8-bit LoRA" + ) + return data + + @model_validator(mode="before") + @classmethod + def check_lora_8bit(cls, data): + if ( + data.get("lora_mlp_kernel") + or data.get("lora_qkv_kernel") + or data.get("lora_o_kernel") + ): + if data.get("adapter") == "lora" and data.get("load_in_8bit"): + raise ValueError( + "lora_mlp_kernel, lora_qkv_kernel, and lora_o_kernel are not compatible with 8-bit LoRA" + ) + return data + + @model_validator(mode="before") + @classmethod + def check_lora_axolotl_unsloth(cls, data): + is_lora_kernel = any( + data.get(k) for k in ["lora_mlp_kernel", "lora_qkv_kernel", "lora_o_kernel"] + ) + is_unsloth_lora = any( + data.get(k) + for k in ["unsloth_lora_mlp", "unsloth_lora_qkv", "unsloth_lora_o"] + ) + if is_lora_kernel and is_unsloth_lora: + raise ValueError( + "both lora_mlp_kernel and unsloth_lora_mlp cannot be true (similarly for lora_qkv_kernel, lora_o_kernel)" + ) + return data + + @model_validator(mode="after") + def check_fused_lora(self): + if self.adapter in ["lora", "qlora"] and ( + self.flash_attn_fuse_qkv or self.flash_attn_fuse_mlp + ): + raise ValueError("Fused modules are not supported with LoRA/QLoRA") + return self + + @model_validator(mode="after") + def hint_lora_8bit(self): + loftq = ( + self.peft and self.peft.loftq_config and self.peft.loftq_config.loftq_bits + ) + if not self.load_in_8bit and self.adapter == "lora" and not loftq: + LOG.warning("We recommend setting `load_in_8bit: true` for LORA finetuning") + return self + + @model_validator(mode="before") + @classmethod + def warn_qlora_zero3_w_use_reentrant(cls, data): + if ( + data.get("adapter") == "qlora" + and data.get("gradient_checkpointing_kwargs", {}) + and data.get("gradient_checkpointing_kwargs", {}).get("use_reentrant") + is False + and data.get("deepspeed", "") is not None + and "zero3" in data.get("deepspeed", "") + ): + # may result in: + # torch.utils.checkpoint.CheckpointError: torch.utils.checkpoint: + # Recomputed values for the following tensors have different metadata + # than during the forward pass. + LOG.warning( + "qlora + zero3 with use_reentrant: false may result in a CheckpointError about recomputed values" + ) + return data + + @model_validator(mode="before") + @classmethod + def check_lora_kernel_8bit(cls, data): + if ( + data.get("lora_mlp_kernel") + or data.get("lora_qkv_kernel") + or data.get("lora_o_kernel") + ): + if data.get("adapter") == "lora" and data.get("load_in_8bit"): + raise ValueError( + "lora_mlp_kernel, lora_qkv_kernel, and lora_o_kernel are not compatible with 8-bit LoRA" + ) + return data + + @model_validator(mode="before") + @classmethod + def check_lora_kernel_rl(cls, data): + if ( + data.get("lora_mlp_kernel") + or data.get("lora_qkv_kernel") + or data.get("lora_o_kernel") + ) and data.get("rl"): + raise ValueError( + "lora_mlp_kernel, lora_qkv_kernel, and lora_o_kernel are not compatible with RL at the moment." + ) + return data + + +class RLValidationMixin: + """Validation methods related to RL training configuration.""" + + @model_validator(mode="before") + @classmethod + def check_sample_packing_w_rl(cls, data): + if data.get("sample_packing") and data.get("rl"): + raise ValueError("`sample_packing: true` does not work with RLHF training") + return data + + @model_validator(mode="before") + @classmethod + def check_kto_config(cls, data): + if data.get("rl") == "kto": + if data.get("sample_packing") or data.get("eval_sample_packing"): + raise ValueError("sample_packing is not supported with kto") + + if data.get("remove_unused_columns") is not False: + raise ValueError("Set `remove_unused_columns: False` when using kto") + return data + + @model_validator(mode="before") + @classmethod + def check_grpo_liger_sequence_parallel(cls, data): + if ( + data.get("rl") == "grpo" + and data.get("trl", {}) + and data.get("trl").get("use_liger_loss") + and data.get("sequence_parallel_degree", 1) > 1 + ): + raise ValueError("GRPO + SP + Liger not currently supported") + return data + + @model_validator(mode="before") + @classmethod + def check_rl_config_gradient_checkpointing(cls, data): + # TODO: SalmanMohammadi + # Distributed RL with QLoRA + gradient checkpointing + # and use_reentrant = True is broken upstream in TRL + # pylint: disable=too-many-boolean-expressions + if ( + data.get("rl") + and data.get("gradient_checkpointing") + and data.get("gradient_checkpointing_kwargs") + and data.get("gradient_checkpointing_kwargs").get("use_reentrant") + and data.get("load_in_4bit") + and data.get("adapter") == "qlora" + and data.get("capabilities") + and data.get("capabilities").get("n_gpu", 1) > 1 + ): + raise ValueError( + "The `use_reentrant: True` implementation of gradient checkpointing " + "is not supported for distributed RL training with QLoRA. Please set " + "`use_reentrant: False` in `gradient_checkpointing_kwargs`." + ) + return data + + +class OptimizationValidationMixin: + """Validation methods related to optimization and performance.""" + + @model_validator(mode="after") + def check_adamw_optimizer_params(self): + if any([self.adam_beta1, self.adam_beta2, self.adam_epsilon]) and ( + not self.optimizer or "adamw" not in str(self.optimizer).lower() + ): + LOG.warning("adamw hyperparameters found, but no adamw optimizer set") + return self + + @model_validator(mode="before") + @classmethod + def check_muon_deepspeed_fsdp(cls, data): + if data.get("optimizer") == "muon" and ( + data.get("deepspeed") or data.get("fsdp") or data.get("fsdp_config") + ): + raise ValueError( + "Muon optimizer is currently incompatible with DeepSpeed and FSDP" + ) + return data + + @model_validator(mode="before") + @classmethod + def check_batch_flattening_fa(cls, data): + if data.get("batch_flattening"): + batch_flattening_auto = data.get("batch_flattening") == "auto" + if not data.get("flash_attention") and not batch_flattening_auto: + raise ValueError("batch_flattening requires flash attention") + if data.get("sample_packing") and not batch_flattening_auto: + raise ValueError("batch_flattening not compatible with sample_packing") + if data.get("micro_batch_size") == 1 and not batch_flattening_auto: + LOG.warning("batch_flattening has no effect with micro_batch_size == 1") + + if ( + batch_flattening_auto + and data.get("flash_attention") + and not data.get("sample_packing") + and data.get("micro_batch_size") > 1 + ): + data["batch_flattening"] = True + elif batch_flattening_auto: + data["batch_flattening"] = False + + return data + + @model_validator(mode="before") + @classmethod + def check_torch_compile_deepspeed(cls, data): + if data.get("deepspeed") and data.get("torch_compile"): + raise ValueError( + "torch_compile should be set within your deepspeed config file" + ) + return data + + @model_validator(mode="before") + @classmethod + def check_xentropy_patch_conflicts(cls, data): + if data.get("flash_attn_cross_entropy") and data.get( + "unsloth_cross_entropy_loss" + ): + raise ValueError( + "flash_attn_cross_entropy and unsloth_cross_entropy_loss cannot be both enabled" + ) + return data + + @model_validator(mode="before") + @classmethod + def check_fsdp_offload_w_8bit_optimizer(cls, data): + if ( + data.get("fsdp") + and "8bit" in data.get("optimizer", "") + and data.get("fsdp_config") + and data["fsdp_config"].get("fsdp_offload_params") + and str(data["fsdp_config"].get("fsdp_version")) != "2" + ): + raise ValueError( + f"FSDP Offload not compatible with {data.get('optimizer')}" + ) + if ( + data.get("fsdp") + and "8bit" in data.get("optimizer", "") + and data.get("fsdp_config") + and str(data["fsdp_config"].get("fsdp_version")) == "2" + ): + if data.get("optimizer", "") in ["adamw_8bit", "adamw_bnb_8bit"]: + # CUDA ops errors with bnb 8bit optimizer + FSDP2 + raise ValueError( + f"FSDP2 not compatible with {data.get('optimizer')}, use `adamw_torch_8bit` instead" + ) + + return data + + @model_validator(mode="before") + @classmethod + def check_fsdp_sharded_state_dict_w_safetensors(cls, data): + if ( + data.get("fsdp") + and data.get("save_safetensors") + and data.get("fsdp_config") + and data["fsdp_config"].get("fsdp_state_dict_type") == "SHARDED_STATE_DICT" + ): + raise ValueError( + "FSDP SHARDED_STATE_DICT not compatible with save_safetensors" + ) + return data + + +class SystemValidationMixin: + """Validation methods related to system and hardware configuration.""" + + @model_validator(mode="before") + @classmethod + def check_mem_mismatch(cls, data): + if ( + data.get("max_memory") is not None + and data.get("gpu_memory_limit") is not None + ): + raise ValueError( + "max_memory and gpu_memory_limit are mutually exclusive and cannot be used together." + ) + return data + + @model_validator(mode="before") + @classmethod + def check_fsdp_deepspeed(cls, data): + if data.get("deepspeed") and data.get("fsdp"): + raise ValueError("deepspeed and fsdp cannot be used together.") + return data + + @model_validator(mode="before") + @classmethod + def check_npu_config(cls, data): + if is_torch_npu_available(): + # check attention config + attn_list = ["flash_attention", "sdp_attention", "s2_attention"] + for attn in attn_list: + if data.get(attn): + raise NotImplementedError( + f"{attn} is currently not supported in Ascend npu, please disable this configuration." + ) + + # check quant config + if data.get("optimizer") is not None and "bit" in data.get("optimizer"): + optimizer = data.get("optimizer") + raise NotImplementedError( + f"{optimizer} is currently not supported in Ascend npu, choose another one please." + ) + + quant_list = ["load_in_8bit", "load_in_4bit"] + for quant in quant_list: + if data.get(quant): + raise NotImplementedError( + f"Quantification is currently not supported in Ascend npu, please disable {quant}." + ) + + # check dtype config + if data.get("tf32"): + raise NotImplementedError( + "tf32 dtype is currently not supported in Ascend npu, please disable this configuration" + ) + + return data + + +class ChatTemplateValidationMixin: + """Validation methods related to chat template configuration.""" + + @model_validator(mode="before") + @classmethod + def check_chat_template_config(cls, data): + # if chat_template is set to jinja, chat_template_jinja is required + if data.get("chat_template") == ChatTemplate.jinja and not data.get( + "chat_template_jinja" + ): + raise ValueError( + "chat_template_jinja is required when chat_template is set to jinja" + ) + + # If chat_template_jinja is set, set chat_template to jinja + if data.get("chat_template_jinja") and not data.get("chat_template"): + data["chat_template"] = ChatTemplate.jinja + + return data + + +class PretrainingValidationMixin: + """Validation methods related to pretraining configuration.""" + + @model_validator(mode="before") + @classmethod + def check_pretraining_w_max_steps(cls, data): + if data.get("pretraining_dataset") and not data.get("max_steps"): + raise ValueError( + "max_steps must be set when using iterable pretraining_dataset, Trainer can't infer length and schedule optimizer/learning rate without it!" + ) + return data + + @model_validator(mode="before") + @classmethod + def check_pretraining_w_group_by_length(cls, data): + if data.get("pretraining_dataset") and data.get("group_by_length"): + LOG.warning( + "You probably want to disable group_by_length as it will force a streamed dataset to download completely." + ) + return data + + @model_validator(mode="before") + @classmethod + def check_pretraining_split_batches_accelerate(cls, data): + # alternatively set ACCELERATE_SPLIT_BATCHES=False + if data.get("pretraining_dataset"): + accelerator_config = data.get("accelerator_config", {}) + if not accelerator_config: + data["accelerator_config"] = { + "split_batches": False, + "dispatch_batches": False, + } + else: + if accelerator_config.get("split_batches") is None: + data["accelerator_config"]["split_batches"] = False + if accelerator_config.get("dispatch_batches") is None: + data["accelerator_config"]["dispatch_batches"] = False + return data + + +class ModelCompatibilityValidationMixin: + """Validation methods for specific model compatibility.""" + + @model_validator(mode="after") + def check_falcon_fsdp(self): + if (self.base_model and "falcon" in self.base_model.lower()) and self.fsdp: + raise ValueError("FSDP is not supported for falcon models") + return self + + @model_validator(mode="after") + def check_mpt_checkpointing(self): + if ( + self.base_model and "mpt" in self.base_model.lower() + ) and self.gradient_checkpointing: + raise ValueError("gradient_checkpointing is not supported for MPT models") + return self + + @model_validator(mode="after") + def check_offload_grad_checkpointing(self): + if self.gradient_checkpointing and self.gradient_checkpointing == "unsloth": + LOG.warning( + "`unsloth` is deprecated for gradient_checkpointing, use `offload`" + ) + self.gradient_checkpointing = "offload" + return self + + @model_validator(mode="after") + def check_better_transformers(self): + if self.flash_optimum is True: + if self.adapter: + LOG.warning( + "BetterTransformers probably doesn't work with PEFT adapters" + ) + if self.fp16 or self.bf16: + raise ValueError("AMP is not supported with BetterTransformer") + if self.float16 is not True and self.bfloat16 is not True: + LOG.warning( + "You should probably set bfloat16 or float16 to true to " + "load the model in float16 for BetterTransformers" + ) + return self + + @model_validator(mode="before") + @classmethod + def check_gptq_w_revision(cls, data): + if data.get("gptq") and data.get("revision_of_model"): + raise ValueError( + "revision_of_model is not supported for GPTQ models. " + + "Please download the model from HuggingFace Hub manually for correct branch, " + + "point to its path, and remove revision_of_model from the config." + ) + return data + + +class ComplexValidationMixin: + """Complex validation methods that involve multiple systems.""" + + @field_validator("neftune_noise_alpha") + @classmethod + def validate_neftune_noise_alpha(cls, neftune_noise_alpha): + if neftune_noise_alpha is not None and neftune_noise_alpha <= 0.0: + raise ValueError("neftune_noise_alpha must be > 0.0") + return neftune_noise_alpha + + @model_validator(mode="after") + def check_rl_beta(self): + if self.dpo_beta and not self.rl_beta: + self.rl_beta = self.dpo_beta + del self.dpo_beta + return self + + @model_validator(mode="after") + def check_simpo_warmup(self): + if self.rl is RLType.SIMPO and self.warmup_ratio: + raise ValueError( + "warmup_ratio is not supported with the simpo trainer. Please use `warmup_steps` instead" + ) + return self + + @model_validator(mode="after") + def check_relora(self): + if self.relora_steps: + if self.adapter not in ("lora", "qlora"): + raise ValueError("cfg.adapter must be lora or qlora to use ReLoRA") + + if self.fsdp: + raise ValueError("fsdp not supported with ReLoRA") + + if self.deepspeed: + raise ValueError("deepspeed not supported with ReLoRA") + + if self.lr_scheduler == "one_cycle": + raise ValueError( + "ReLoRA is not compatible with the one_cycle scheduler" + ) + + if self.flash_attn_fuse_qkv or self.flash_attn_fuse_mlp: + raise ValueError("Fused modules are not supported with ReLoRA") + return self + + @model_validator(mode="after") + def check_early_stopping(self): + if self.early_stopping_patience: + if not self.save_steps or not self.eval_steps: + raise ValueError( + "`early_stopping_patience` requires save_steps and eval_steps to be set. eval_steps should evenly divide save_steps." + ) + if self.save_steps % self.eval_steps != 0: + raise ValueError( + "`early_stopping_patience` requires that eval_steps should evenly divide save_steps." + ) + return self + + @model_validator(mode="after") + def check_sequence_parallel_degree(self): + if not self.sequence_parallel_degree: + self.sequence_parallel_degree = 1 + elif self.sequence_parallel_degree > 1: + if not self.flash_attention: + raise ValueError( + "flash_attention: true must be set with sequence_parallel_degree > 1" + ) + + if self.sample_packing and self.micro_batch_size > 1: + raise ValueError( + "micro_batch_size must be set to 1 when sample_packing is enabled " + "due to a `ring-flash-attn` requirement" + ) + + try: + import ring_flash_attn # noqa: F401 # pylint:disable=unused-import + except ImportError as exception: + raise ImportError( + "sequence_parallel_degree > 1 but ring_flash_attn is not installed. " + "Please install it with `pip install axolotl[ring-flash-attn] " + "or `pip install ring-flash-attn>=0.1.4`." + ) from exception + + LOG.warning( + "Sequence parallelism (SP) is enabled with " + f"sequence_parallel_degree={self.sequence_parallel_degree}. " + "Please note that logged losses may differ slightly to the non-SP " + "losses due to transformers Trainer implementation details. " + "Please see https://github.com/axolotl-ai-cloud/axolotl/pull/2495#issuecomment-2784022042 " + "for more details." + ) + + return self + + @model_validator(mode="after") + def validate_ring_attn_func(self): + if getattr(self, "sequence_parallel_degree", 1) == 1: + return self + + if self.ring_attn_func is not None: + self.ring_attn_func = RingAttnFunc(self.ring_attn_func) + else: + # Default ring attention function selection + sample_packing = getattr(self, "sample_packing", False) + self.ring_attn_func = ( + RingAttnFunc.VARLEN_LLAMA3 + if sample_packing + else RingAttnFunc.BATCH_RING + ) + + return self + + +# pylint: disable=too-many-ancestors +class ValidationMixin( + DatasetValidationMixin, + AttentionValidationMixin, + TrainingValidationMixin, + LoRAValidationMixin, + RLValidationMixin, + OptimizationValidationMixin, + SystemValidationMixin, + ChatTemplateValidationMixin, + PretrainingValidationMixin, + ModelCompatibilityValidationMixin, + ComplexValidationMixin, +): + """Full validation mixin for Axolotl configuration."""