diff --git a/.nojekyll b/.nojekyll index 0737c307a..5ceb1bacf 100644 --- a/.nojekyll +++ b/.nojekyll @@ -1 +1 @@ -d2901af9 \ No newline at end of file +9973e775 \ No newline at end of file diff --git a/docs/api/cli.args.html b/docs/api/cli.args.html index fe0473e8a..2cec2ec38 100644 --- a/docs/api/cli.args.html +++ b/docs/api/cli.args.html @@ -531,72 +531,67 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

EvaluateCliArgs

cli.args.EvaluateCliArgs(
-    self,
-    debug=False,
-    debug_text_only=False,
-    debug_num_examples=0,
-)
+ debug=False, + debug_text_only=False, + debug_num_examples=0, +)

Dataclass with CLI arguments for axolotl evaluate command.

InferenceCliArgs

-
cli.args.InferenceCliArgs(self, prompter=None)
+
cli.args.InferenceCliArgs(prompter=None)

Dataclass with CLI arguments for axolotl inference command.

PreprocessCliArgs

cli.args.PreprocessCliArgs(
-    self,
-    debug=False,
-    debug_text_only=False,
-    debug_num_examples=1,
-    prompter=None,
-    download=True,
-    iterable=None,
-)
+ debug=False, + debug_text_only=False, + debug_num_examples=1, + prompter=None, + download=True, + iterable=None, +)

Dataclass with CLI arguments for axolotl preprocess command.

QuantizeCliArgs

cli.args.QuantizeCliArgs(
-    self,
-    base_model=None,
-    weight_dtype=None,
-    activation_dtype=None,
-    quantize_embedding=None,
-    group_size=None,
-    output_dir=None,
-)
+ base_model=None, + weight_dtype=None, + activation_dtype=None, + quantize_embedding=None, + group_size=None, + output_dir=None, +)

Dataclass with CLI arguments for axolotl quantize command.

TrainerCliArgs

cli.args.TrainerCliArgs(
-    self,
-    debug=False,
-    debug_text_only=False,
-    debug_num_examples=0,
-    merge_lora=False,
-    prompter=None,
-    shard=False,
-    main_process_port=None,
-    num_processes=None,
-)
+ debug=False, + debug_text_only=False, + debug_num_examples=0, + merge_lora=False, + prompter=None, + shard=False, + main_process_port=None, + num_processes=None, +)

Dataclass with CLI arguments for axolotl train command.

VllmServeCliArgs

cli.args.VllmServeCliArgs(
-    self,
-    tensor_parallel_size=None,
-    host=None,
-    port=None,
-    gpu_memory_utilization=None,
-    dtype=None,
-    max_model_len=None,
-    enable_prefix_caching=None,
-    serve_module=None,
-)
+ tensor_parallel_size=None, + host=None, + port=None, + gpu_memory_utilization=None, + dtype=None, + max_model_len=None, + enable_prefix_caching=None, + serve_module=None, +)

Dataclass with CLI arguments for axolotl vllm-serve command.

diff --git a/docs/api/cli.cloud.modal_.html b/docs/api/cli.cloud.modal_.html index 0f6f6e40c..fda806f09 100644 --- a/docs/api/cli.cloud.modal_.html +++ b/docs/api/cli.cloud.modal_.html @@ -509,7 +509,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

ModalCloud

-
cli.cloud.modal_.ModalCloud(self, config, app=None)
+
cli.cloud.modal_.ModalCloud(config, app=None)

Modal Cloud implementation.

diff --git a/docs/api/common.datasets.html b/docs/api/common.datasets.html index 52ca199c5..b9d0971ef 100644 --- a/docs/api/common.datasets.html +++ b/docs/api/common.datasets.html @@ -512,11 +512,10 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

TrainDatasetMeta

common.datasets.TrainDatasetMeta(
-    self,
-    train_dataset,
-    eval_dataset=None,
-    total_num_steps=None,
-)
+ train_dataset, + eval_dataset=None, + total_num_steps=None, +)

Dataclass with fields for training and validation datasets and metadata.

diff --git a/docs/api/convert.html b/docs/api/convert.html index f36be717b..b6afecd80 100644 --- a/docs/api/convert.html +++ b/docs/api/convert.html @@ -535,7 +535,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

FileWriter

-
convert.FileWriter(self, file_path)
+
convert.FileWriter(file_path)

Writes a string to a file

@@ -546,12 +546,11 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

JsonToJsonlConverter

convert.JsonToJsonlConverter(
-    self,
-    file_reader,
-    file_writer,
-    json_parser,
-    jsonl_serializer,
-)
+ file_reader, + file_writer, + json_parser, + jsonl_serializer, +)

Converts a JSON file to JSONL

diff --git a/docs/api/core.builders.base.html b/docs/api/core.builders.base.html new file mode 100644 index 000000000..d5f81a26d --- /dev/null +++ b/docs/api/core.builders.base.html @@ -0,0 +1,943 @@ + + + + + + + + + +core.builders.base – Axolotl + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ + + + +
+

core.builders.base

+

core.builders.base

+

Base class for trainer builder

+
+

Classes

+ + + + + + + + + + + + + +
NameDescription
TrainerBuilderBaseBase class for trainer builder.
+
+

TrainerBuilderBase

+
core.builders.base.TrainerBuilderBase(cfg, model, tokenizer, processor=None)
+

Base class for trainer builder.

+
+

Methods

+ + + + + + + + + + + + + +
NameDescription
get_post_trainer_create_callbacksCallbacks added after the trainer is created, usually b/c these need access to the trainer
+
+
get_post_trainer_create_callbacks
+
core.builders.base.TrainerBuilderBase.get_post_trainer_create_callbacks(trainer)
+

Callbacks added after the trainer is created, usually b/c these need access to the trainer

+ + +
+
+
+
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/docs/api/core.trainer_builder.html b/docs/api/core.builders.causal.html similarity index 83% rename from docs/api/core.trainer_builder.html rename to docs/api/core.builders.causal.html index 4c7c3ca99..dce13980e 100644 --- a/docs/api/core.trainer_builder.html +++ b/docs/api/core.builders.causal.html @@ -7,7 +7,7 @@ -core.trainer_builder – Axolotl +core.builders.causal – Axolotl + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ + + + +
+

core.builders.rl

+

core.builders.rl

+

Builder for RLHF trainers

+
+

Classes

+ + + + + + + + + + + + + + + + + +
NameDescription
HFPPOTrainerBuilderHF Factory class for PPO Trainer
HFRLTrainerBuilderTrainer factory class for TRL-based RLHF trainers (e.g. DPO)
+
+

HFPPOTrainerBuilder

+
core.builders.rl.HFPPOTrainerBuilder(cfg, model, tokenizer, processor=None)
+

HF Factory class for PPO Trainer

+
+
+

HFRLTrainerBuilder

+
core.builders.rl.HFRLTrainerBuilder(cfg, model, tokenizer, processor=None)
+

Trainer factory class for TRL-based RLHF trainers (e.g. DPO)

+ + +
+
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/docs/api/core.datasets.chat.html b/docs/api/core.datasets.chat.html index 224939abc..0de1fd923 100644 --- a/docs/api/core.datasets.chat.html +++ b/docs/api/core.datasets.chat.html @@ -506,16 +506,15 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

TokenizedChatDataset

core.datasets.chat.TokenizedChatDataset(
-    self,
-    data,
-    model_transform,
-    *args,
-    message_transform=None,
-    formatter=None,
-    process_count=None,
-    keep_in_memory=False,
-    **kwargs,
-)
+ data, + model_transform, + *args, + message_transform=None, + formatter=None, + process_count=None, + keep_in_memory=False, + **kwargs, +)

Tokenized chat dataset

diff --git a/docs/api/core.trainers.base.html b/docs/api/core.trainers.base.html index 594871775..125e1c237 100644 --- a/docs/api/core.trainers.base.html +++ b/docs/api/core.trainers.base.html @@ -506,13 +506,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

AxolotlTrainer

core.trainers.base.AxolotlTrainer(
-    self,
-    *_args,
-    bench_data_collator=None,
-    eval_data_collator=None,
-    dataset_tags=None,
-    **kwargs,
-)
+ *_args, + bench_data_collator=None, + eval_data_collator=None, + dataset_tags=None, + **kwargs, +)

Extend the base Trainer for axolotl helpers

Methods

diff --git a/docs/api/core.trainers.dpo.trainer.html b/docs/api/core.trainers.dpo.trainer.html index 310da8648..1588f3fa8 100644 --- a/docs/api/core.trainers.dpo.trainer.html +++ b/docs/api/core.trainers.dpo.trainer.html @@ -505,12 +505,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

AxolotlDPOTrainer

-
core.trainers.dpo.trainer.AxolotlDPOTrainer(
-    self,
-    *args,
-    dataset_tags=None,
-    **kwargs,
-)
+
core.trainers.dpo.trainer.AxolotlDPOTrainer(*args, dataset_tags=None, **kwargs)

Extend the base DPOTrainer for axolotl helpers.

Methods

diff --git a/docs/api/core.trainers.grpo.sampler.html b/docs/api/core.trainers.grpo.sampler.html index a157e94f6..6cc440d28 100644 --- a/docs/api/core.trainers.grpo.sampler.html +++ b/docs/api/core.trainers.grpo.sampler.html @@ -509,18 +509,17 @@ sequence parallel group.

SequenceParallelRepeatRandomSampler

core.trainers.grpo.sampler.SequenceParallelRepeatRandomSampler(
-    self,
-    dataset,
-    mini_repeat_count,
-    world_size,
-    rank,
-    batch_size=1,
-    repeat_count=1,
-    sequence_parallel_degree=1,
-    shuffle=True,
-    seed=0,
-    drop_last=False,
-)
+ dataset, + mini_repeat_count, + world_size, + rank, + batch_size=1, + repeat_count=1, + sequence_parallel_degree=1, + shuffle=True, + seed=0, + drop_last=False, +)

Sampler for GRPO training with sequence parallelism.

This sampler ensures: - Ranks in the same sequence parallel (SP) group receive identical data. diff --git a/docs/api/core.trainers.grpo.trainer.html b/docs/api/core.trainers.grpo.trainer.html index ae54824a7..84af6d59e 100644 --- a/docs/api/core.trainers.grpo.trainer.html +++ b/docs/api/core.trainers.grpo.trainer.html @@ -511,17 +511,17 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

AxolotlGRPOSequenceParallelTrainer

core.trainers.grpo.trainer.AxolotlGRPOSequenceParallelTrainer(
-    self,
-    model,
-    reward_funcs,
-    args=None,
-    train_dataset=None,
-    eval_dataset=None,
-    processing_class=None,
-    reward_processing_classes=None,
-    callbacks=None,
-    optimizers=(None, None),
-    peft_config=None,
+    model,
+    reward_funcs,
+    args=None,
+    train_dataset=None,
+    eval_dataset=None,
+    processing_class=None,
+    reward_processing_classes=None,
+    callbacks=None,
+    optimizers=(None, None),
+    peft_config=None,
+    optimizer_cls_and_kwargs=None,
 )

Extend the base GRPOTrainer for sequence parallelism handling

@@ -550,7 +550,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

AxolotlGRPOTrainer

-
core.trainers.grpo.trainer.AxolotlGRPOTrainer()
+
core.trainers.grpo.trainer.AxolotlGRPOTrainer(*args, **kwargs)

Extend the base GRPOTrainer for axolotl helpers

diff --git a/docs/api/core.trainers.mamba.html b/docs/api/core.trainers.mamba.html index 4860d17ef..053aee2fb 100644 --- a/docs/api/core.trainers.mamba.html +++ b/docs/api/core.trainers.mamba.html @@ -506,13 +506,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

AxolotlMambaTrainer

core.trainers.mamba.AxolotlMambaTrainer(
-    self,
-    *_args,
-    bench_data_collator=None,
-    eval_data_collator=None,
-    dataset_tags=None,
-    **kwargs,
-)
+ *_args, + bench_data_collator=None, + eval_data_collator=None, + dataset_tags=None, + **kwargs, +)

Mamba specific trainer to handle loss calculation

diff --git a/docs/api/core.trainers.mixins.optimizer.html b/docs/api/core.trainers.mixins.optimizer.html index 78a3bc055..4f554b267 100644 --- a/docs/api/core.trainers.mixins.optimizer.html +++ b/docs/api/core.trainers.mixins.optimizer.html @@ -471,6 +471,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); @@ -498,14 +499,24 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); +OptimizerInitMixin +Mixin to handle common optimizer initialization logic for Trainers (mostly TRL) that do not + + OptimizerMixin Mixin class for shared handling of building custom optimizers +
+

OptimizerInitMixin

+
core.trainers.mixins.optimizer.OptimizerInitMixin(*args, **kwargs)
+

Mixin to handle common optimizer initialization logic for Trainers (mostly TRL) that do not +accept optimizer_cls_and_kwargs as kwarg in constructor.

+

OptimizerMixin

-
core.trainers.mixins.optimizer.OptimizerMixin()
+
core.trainers.mixins.optimizer.OptimizerMixin()

Mixin class for shared handling of building custom optimizers

diff --git a/docs/api/core.trainers.relora.html b/docs/api/core.trainers.relora.html index bd0175b57..9feaf7975 100644 --- a/docs/api/core.trainers.relora.html +++ b/docs/api/core.trainers.relora.html @@ -505,7 +505,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

ReLoRATrainer

-
core.trainers.relora.ReLoRATrainer(self, *args, **kwargs)
+
core.trainers.relora.ReLoRATrainer(*args, **kwargs)

Trainer subclass that uses the OneCycleLR scheduler

diff --git a/docs/api/core.trainers.trl.html b/docs/api/core.trainers.trl.html index 3e7e7d4b8..ec001381b 100644 --- a/docs/api/core.trainers.trl.html +++ b/docs/api/core.trainers.trl.html @@ -530,84 +530,32 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

AxolotlCPOTrainer

-
core.trainers.trl.AxolotlCPOTrainer()
+
core.trainers.trl.AxolotlCPOTrainer(*args, **kwargs)

Extend the base CPOTrainer for axolotl helpers

-
-

Methods

- - - - - - - - - - - - - -
NameDescription
get_batch_loss_metricsCompute the CPO loss and other metrics for the given batch of inputs for train or test.
-
-
get_batch_loss_metrics
-
core.trainers.trl.AxolotlCPOTrainer.get_batch_loss_metrics(
-    model,
-    batch,
-    train_eval='train',
-)
-

Compute the CPO loss and other metrics for the given batch of inputs for train or test.

-
-

AxolotlKTOTrainer

-
core.trainers.trl.AxolotlKTOTrainer()
+
core.trainers.trl.AxolotlKTOTrainer(*args, **kwargs)

Extend the base KTOTrainer for axolotl helpers

AxolotlORPOTrainer

-
core.trainers.trl.AxolotlORPOTrainer()
+
core.trainers.trl.AxolotlORPOTrainer(*args, **kwargs)

Extend the base ORPOTrainer for axolotl helpers

-
-

Methods

- - - - - - - - - - - - - -
NameDescription
get_batch_loss_metricsCompute the ORPO loss and other metrics for the given batch of inputs for train or test.
-
-
get_batch_loss_metrics
-
core.trainers.trl.AxolotlORPOTrainer.get_batch_loss_metrics(
-    model,
-    batch,
-    train_eval='train',
-)
-

Compute the ORPO loss and other metrics for the given batch of inputs for train or test.

-
-

AxolotlPRMTrainer

-
core.trainers.trl.AxolotlPRMTrainer()
+
core.trainers.trl.AxolotlPRMTrainer(*args, **kwargs)

Extend the base trl.PRMTrainer for axolotl helpers

AxolotlRewardTrainer

-
core.trainers.trl.AxolotlRewardTrainer()
+
core.trainers.trl.AxolotlRewardTrainer(*args, **kwargs)

Extend the base RewardTrainer for axolotl helpers

TRLPPOTrainer

-
core.trainers.trl.TRLPPOTrainer()
+
core.trainers.trl.TRLPPOTrainer()

Wrapper for TRL PPO trainer to handle customizations

diff --git a/docs/api/core.training_args.html b/docs/api/core.training_args.html index a3c553073..a9aeefb93 100644 --- a/docs/api/core.training_args.html +++ b/docs/api/core.training_args.html @@ -536,326 +536,314 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

AxolotlCPOConfig

core.training_args.AxolotlCPOConfig(
-    self,
-    model_type=None,
-    lr_quadratic_warmup=False,
-    pretraining=False,
-    sample_packing=False,
-    sample_packing_sequentially=False,
-    multipack_real_batches=False,
-    eval_sample_packing=None,
-    sample_packing_efficiency=1.0,
-    sample_packing_bin_size=200,
-    sample_packing_group_size=100000,
-    max_seq_length=2048,
-    relora_steps=None,
-    relora_warmup_steps=None,
-    relora_anneal_steps=None,
-    relora_prune_ratio=0.9,
-    bench_split='eval',
-    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
-    do_bench_eval=False,
-    do_causal_lm_eval=False,
-    max_bench_samples=None,
-    bench_source_max_len=2048,
-    dataloader_prefetch_factor=None,
-    cosine_min_lr_ratio=None,
-    cosine_constant_lr_ratio=None,
-    loraplus_lr_ratio=None,
-    loraplus_lr_embedding=1e-06,
-    embedding_lr_scale=None,
-    lr_groups=None,
-    embedding_lr=None,
-    qlora=False,
-    orpo_alpha=None,
-    lisa_n_layers=None,
-    lisa_step_interval=None,
-    lisa_layers_attribute=None,
-    curriculum_sampling=None,
-    alternate_optimizer=None,
-    alternate_lr_scheduler_type=None,
-    chat_template=None,
-    kd_ce_alpha=None,
-    kd_alpha=1.0,
-    kd_temperature=1.0,
-    kd_zscore_base_temp=None,
-    kd_top_k_before_softmax=None,
-    adam_beta3=None,
-    adam_epsilon2=None,
-    image_size=None,
-    image_resize_algorithm=None,
-    simpo_gamma=None,
-)
+ model_type=None, + lr_quadratic_warmup=False, + pretraining=False, + sample_packing=False, + sample_packing_sequentially=False, + multipack_real_batches=False, + eval_sample_packing=None, + sample_packing_efficiency=1.0, + sample_packing_bin_size=200, + sample_packing_group_size=100000, + max_seq_length=2048, + relora_steps=None, + relora_warmup_steps=None, + relora_anneal_steps=None, + relora_prune_ratio=0.9, + bench_split='eval', + bench_dataset='pharaouk/dharma-1/dharma_1_mini.json', + do_bench_eval=False, + do_causal_lm_eval=False, + max_bench_samples=None, + bench_source_max_len=2048, + dataloader_prefetch_factor=None, + cosine_min_lr_ratio=None, + cosine_constant_lr_ratio=None, + loraplus_lr_ratio=None, + loraplus_lr_embedding=1e-06, + embedding_lr_scale=None, + lr_groups=None, + embedding_lr=None, + qlora=False, + orpo_alpha=None, + lisa_n_layers=None, + lisa_step_interval=None, + lisa_layers_attribute=None, + curriculum_sampling=None, + alternate_lr_scheduler_type=None, + chat_template=None, + kd_ce_alpha=None, + kd_alpha=1.0, + kd_temperature=1.0, + kd_zscore_base_temp=None, + kd_top_k_before_softmax=None, + adam_beta3=None, + adam_epsilon2=None, + image_size=None, + image_resize_algorithm=None, + simpo_gamma=None, +)

CPO config for CPO training

AxolotlKTOConfig

core.training_args.AxolotlKTOConfig(
-    self,
-    model_type=None,
-    lr_quadratic_warmup=False,
-    pretraining=False,
-    sample_packing=False,
-    sample_packing_sequentially=False,
-    multipack_real_batches=False,
-    eval_sample_packing=None,
-    sample_packing_efficiency=1.0,
-    sample_packing_bin_size=200,
-    sample_packing_group_size=100000,
-    max_seq_length=2048,
-    relora_steps=None,
-    relora_warmup_steps=None,
-    relora_anneal_steps=None,
-    relora_prune_ratio=0.9,
-    bench_split='eval',
-    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
-    do_bench_eval=False,
-    do_causal_lm_eval=False,
-    max_bench_samples=None,
-    bench_source_max_len=2048,
-    dataloader_prefetch_factor=None,
-    cosine_min_lr_ratio=None,
-    cosine_constant_lr_ratio=None,
-    loraplus_lr_ratio=None,
-    loraplus_lr_embedding=1e-06,
-    embedding_lr_scale=None,
-    lr_groups=None,
-    embedding_lr=None,
-    qlora=False,
-    orpo_alpha=None,
-    lisa_n_layers=None,
-    lisa_step_interval=None,
-    lisa_layers_attribute=None,
-    curriculum_sampling=None,
-    alternate_optimizer=None,
-    alternate_lr_scheduler_type=None,
-    chat_template=None,
-    kd_ce_alpha=None,
-    kd_alpha=1.0,
-    kd_temperature=1.0,
-    kd_zscore_base_temp=None,
-    kd_top_k_before_softmax=None,
-    adam_beta3=None,
-    adam_epsilon2=None,
-    image_size=None,
-    image_resize_algorithm=None,
-)
+ model_type=None, + lr_quadratic_warmup=False, + pretraining=False, + sample_packing=False, + sample_packing_sequentially=False, + multipack_real_batches=False, + eval_sample_packing=None, + sample_packing_efficiency=1.0, + sample_packing_bin_size=200, + sample_packing_group_size=100000, + max_seq_length=2048, + relora_steps=None, + relora_warmup_steps=None, + relora_anneal_steps=None, + relora_prune_ratio=0.9, + bench_split='eval', + bench_dataset='pharaouk/dharma-1/dharma_1_mini.json', + do_bench_eval=False, + do_causal_lm_eval=False, + max_bench_samples=None, + bench_source_max_len=2048, + dataloader_prefetch_factor=None, + cosine_min_lr_ratio=None, + cosine_constant_lr_ratio=None, + loraplus_lr_ratio=None, + loraplus_lr_embedding=1e-06, + embedding_lr_scale=None, + lr_groups=None, + embedding_lr=None, + qlora=False, + orpo_alpha=None, + lisa_n_layers=None, + lisa_step_interval=None, + lisa_layers_attribute=None, + curriculum_sampling=None, + alternate_lr_scheduler_type=None, + chat_template=None, + kd_ce_alpha=None, + kd_alpha=1.0, + kd_temperature=1.0, + kd_zscore_base_temp=None, + kd_top_k_before_softmax=None, + adam_beta3=None, + adam_epsilon2=None, + image_size=None, + image_resize_algorithm=None, +)

KTO config for KTO training

AxolotlORPOConfig

core.training_args.AxolotlORPOConfig(
-    self,
-    model_type=None,
-    lr_quadratic_warmup=False,
-    pretraining=False,
-    sample_packing=False,
-    sample_packing_sequentially=False,
-    multipack_real_batches=False,
-    eval_sample_packing=None,
-    sample_packing_efficiency=1.0,
-    sample_packing_bin_size=200,
-    sample_packing_group_size=100000,
-    max_seq_length=2048,
-    relora_steps=None,
-    relora_warmup_steps=None,
-    relora_anneal_steps=None,
-    relora_prune_ratio=0.9,
-    bench_split='eval',
-    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
-    do_bench_eval=False,
-    do_causal_lm_eval=False,
-    max_bench_samples=None,
-    bench_source_max_len=2048,
-    dataloader_prefetch_factor=None,
-    cosine_min_lr_ratio=None,
-    cosine_constant_lr_ratio=None,
-    loraplus_lr_ratio=None,
-    loraplus_lr_embedding=1e-06,
-    embedding_lr_scale=None,
-    lr_groups=None,
-    embedding_lr=None,
-    qlora=False,
-    orpo_alpha=None,
-    lisa_n_layers=None,
-    lisa_step_interval=None,
-    lisa_layers_attribute=None,
-    curriculum_sampling=None,
-    alternate_optimizer=None,
-    alternate_lr_scheduler_type=None,
-    chat_template=None,
-    kd_ce_alpha=None,
-    kd_alpha=1.0,
-    kd_temperature=1.0,
-    kd_zscore_base_temp=None,
-    kd_top_k_before_softmax=None,
-    adam_beta3=None,
-    adam_epsilon2=None,
-    image_size=None,
-    image_resize_algorithm=None,
-)
+ model_type=None, + lr_quadratic_warmup=False, + pretraining=False, + sample_packing=False, + sample_packing_sequentially=False, + multipack_real_batches=False, + eval_sample_packing=None, + sample_packing_efficiency=1.0, + sample_packing_bin_size=200, + sample_packing_group_size=100000, + max_seq_length=2048, + relora_steps=None, + relora_warmup_steps=None, + relora_anneal_steps=None, + relora_prune_ratio=0.9, + bench_split='eval', + bench_dataset='pharaouk/dharma-1/dharma_1_mini.json', + do_bench_eval=False, + do_causal_lm_eval=False, + max_bench_samples=None, + bench_source_max_len=2048, + dataloader_prefetch_factor=None, + cosine_min_lr_ratio=None, + cosine_constant_lr_ratio=None, + loraplus_lr_ratio=None, + loraplus_lr_embedding=1e-06, + embedding_lr_scale=None, + lr_groups=None, + embedding_lr=None, + qlora=False, + orpo_alpha=None, + lisa_n_layers=None, + lisa_step_interval=None, + lisa_layers_attribute=None, + curriculum_sampling=None, + alternate_lr_scheduler_type=None, + chat_template=None, + kd_ce_alpha=None, + kd_alpha=1.0, + kd_temperature=1.0, + kd_zscore_base_temp=None, + kd_top_k_before_softmax=None, + adam_beta3=None, + adam_epsilon2=None, + image_size=None, + image_resize_algorithm=None, +)

ORPO config for ORPO training

AxolotlPRMConfig

core.training_args.AxolotlPRMConfig(
-    self,
-    model_type=None,
-    lr_quadratic_warmup=False,
-    pretraining=False,
-    sample_packing=False,
-    sample_packing_sequentially=False,
-    multipack_real_batches=False,
-    eval_sample_packing=None,
-    sample_packing_efficiency=1.0,
-    sample_packing_bin_size=200,
-    sample_packing_group_size=100000,
-    max_seq_length=2048,
-    relora_steps=None,
-    relora_warmup_steps=None,
-    relora_anneal_steps=None,
-    relora_prune_ratio=0.9,
-    bench_split='eval',
-    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
-    do_bench_eval=False,
-    do_causal_lm_eval=False,
-    max_bench_samples=None,
-    bench_source_max_len=2048,
-    dataloader_prefetch_factor=None,
-    cosine_min_lr_ratio=None,
-    cosine_constant_lr_ratio=None,
-    loraplus_lr_ratio=None,
-    loraplus_lr_embedding=1e-06,
-    embedding_lr_scale=None,
-    lr_groups=None,
-    embedding_lr=None,
-    qlora=False,
-    orpo_alpha=None,
-    lisa_n_layers=None,
-    lisa_step_interval=None,
-    lisa_layers_attribute=None,
-    curriculum_sampling=None,
-    alternate_optimizer=None,
-    alternate_lr_scheduler_type=None,
-    chat_template=None,
-    kd_ce_alpha=None,
-    kd_alpha=1.0,
-    kd_temperature=1.0,
-    kd_zscore_base_temp=None,
-    kd_top_k_before_softmax=None,
-    adam_beta3=None,
-    adam_epsilon2=None,
-    image_size=None,
-    image_resize_algorithm=None,
-)
+ model_type=None, + lr_quadratic_warmup=False, + pretraining=False, + sample_packing=False, + sample_packing_sequentially=False, + multipack_real_batches=False, + eval_sample_packing=None, + sample_packing_efficiency=1.0, + sample_packing_bin_size=200, + sample_packing_group_size=100000, + max_seq_length=2048, + relora_steps=None, + relora_warmup_steps=None, + relora_anneal_steps=None, + relora_prune_ratio=0.9, + bench_split='eval', + bench_dataset='pharaouk/dharma-1/dharma_1_mini.json', + do_bench_eval=False, + do_causal_lm_eval=False, + max_bench_samples=None, + bench_source_max_len=2048, + dataloader_prefetch_factor=None, + cosine_min_lr_ratio=None, + cosine_constant_lr_ratio=None, + loraplus_lr_ratio=None, + loraplus_lr_embedding=1e-06, + embedding_lr_scale=None, + lr_groups=None, + embedding_lr=None, + qlora=False, + orpo_alpha=None, + lisa_n_layers=None, + lisa_step_interval=None, + lisa_layers_attribute=None, + curriculum_sampling=None, + alternate_lr_scheduler_type=None, + chat_template=None, + kd_ce_alpha=None, + kd_alpha=1.0, + kd_temperature=1.0, + kd_zscore_base_temp=None, + kd_top_k_before_softmax=None, + adam_beta3=None, + adam_epsilon2=None, + image_size=None, + image_resize_algorithm=None, +)

PRM config for PRM training

AxolotlRewardConfig

core.training_args.AxolotlRewardConfig(
-    self,
-    model_type=None,
-    lr_quadratic_warmup=False,
-    pretraining=False,
-    sample_packing=False,
-    sample_packing_sequentially=False,
-    multipack_real_batches=False,
-    eval_sample_packing=None,
-    sample_packing_efficiency=1.0,
-    sample_packing_bin_size=200,
-    sample_packing_group_size=100000,
-    max_seq_length=2048,
-    relora_steps=None,
-    relora_warmup_steps=None,
-    relora_anneal_steps=None,
-    relora_prune_ratio=0.9,
-    bench_split='eval',
-    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
-    do_bench_eval=False,
-    do_causal_lm_eval=False,
-    max_bench_samples=None,
-    bench_source_max_len=2048,
-    dataloader_prefetch_factor=None,
-    cosine_min_lr_ratio=None,
-    cosine_constant_lr_ratio=None,
-    loraplus_lr_ratio=None,
-    loraplus_lr_embedding=1e-06,
-    embedding_lr_scale=None,
-    lr_groups=None,
-    embedding_lr=None,
-    qlora=False,
-    orpo_alpha=None,
-    lisa_n_layers=None,
-    lisa_step_interval=None,
-    lisa_layers_attribute=None,
-    curriculum_sampling=None,
-    alternate_optimizer=None,
-    alternate_lr_scheduler_type=None,
-    chat_template=None,
-    kd_ce_alpha=None,
-    kd_alpha=1.0,
-    kd_temperature=1.0,
-    kd_zscore_base_temp=None,
-    kd_top_k_before_softmax=None,
-    adam_beta3=None,
-    adam_epsilon2=None,
-    image_size=None,
-    image_resize_algorithm=None,
-)
+ model_type=None, + lr_quadratic_warmup=False, + pretraining=False, + sample_packing=False, + sample_packing_sequentially=False, + multipack_real_batches=False, + eval_sample_packing=None, + sample_packing_efficiency=1.0, + sample_packing_bin_size=200, + sample_packing_group_size=100000, + max_seq_length=2048, + relora_steps=None, + relora_warmup_steps=None, + relora_anneal_steps=None, + relora_prune_ratio=0.9, + bench_split='eval', + bench_dataset='pharaouk/dharma-1/dharma_1_mini.json', + do_bench_eval=False, + do_causal_lm_eval=False, + max_bench_samples=None, + bench_source_max_len=2048, + dataloader_prefetch_factor=None, + cosine_min_lr_ratio=None, + cosine_constant_lr_ratio=None, + loraplus_lr_ratio=None, + loraplus_lr_embedding=1e-06, + embedding_lr_scale=None, + lr_groups=None, + embedding_lr=None, + qlora=False, + orpo_alpha=None, + lisa_n_layers=None, + lisa_step_interval=None, + lisa_layers_attribute=None, + curriculum_sampling=None, + alternate_lr_scheduler_type=None, + chat_template=None, + kd_ce_alpha=None, + kd_alpha=1.0, + kd_temperature=1.0, + kd_zscore_base_temp=None, + kd_top_k_before_softmax=None, + adam_beta3=None, + adam_epsilon2=None, + image_size=None, + image_resize_algorithm=None, +)

Reward config for Reward training

AxolotlTrainingArguments

core.training_args.AxolotlTrainingArguments(
-    self,
-    model_type=None,
-    lr_quadratic_warmup=False,
-    pretraining=False,
-    sample_packing=False,
-    sample_packing_sequentially=False,
-    multipack_real_batches=False,
-    eval_sample_packing=None,
-    sample_packing_efficiency=1.0,
-    sample_packing_bin_size=200,
-    sample_packing_group_size=100000,
-    max_seq_length=2048,
-    relora_steps=None,
-    relora_warmup_steps=None,
-    relora_anneal_steps=None,
-    relora_prune_ratio=0.9,
-    bench_split='eval',
-    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
-    do_bench_eval=False,
-    do_causal_lm_eval=False,
-    max_bench_samples=None,
-    bench_source_max_len=2048,
-    dataloader_prefetch_factor=None,
-    cosine_min_lr_ratio=None,
-    cosine_constant_lr_ratio=None,
-    loraplus_lr_ratio=None,
-    loraplus_lr_embedding=1e-06,
-    embedding_lr_scale=None,
-    lr_groups=None,
-    embedding_lr=None,
-    qlora=False,
-    orpo_alpha=None,
-    lisa_n_layers=None,
-    lisa_step_interval=None,
-    lisa_layers_attribute=None,
-    curriculum_sampling=None,
-    alternate_optimizer=None,
-    alternate_lr_scheduler_type=None,
-    chat_template=None,
-    kd_ce_alpha=None,
-    kd_alpha=1.0,
-    kd_temperature=1.0,
-    kd_zscore_base_temp=None,
-    kd_top_k_before_softmax=None,
-    adam_beta3=None,
-    adam_epsilon2=None,
-    image_size=None,
-    image_resize_algorithm=None,
-)
+ model_type=None, + lr_quadratic_warmup=False, + pretraining=False, + sample_packing=False, + sample_packing_sequentially=False, + multipack_real_batches=False, + eval_sample_packing=None, + sample_packing_efficiency=1.0, + sample_packing_bin_size=200, + sample_packing_group_size=100000, + max_seq_length=2048, + relora_steps=None, + relora_warmup_steps=None, + relora_anneal_steps=None, + relora_prune_ratio=0.9, + bench_split='eval', + bench_dataset='pharaouk/dharma-1/dharma_1_mini.json', + do_bench_eval=False, + do_causal_lm_eval=False, + max_bench_samples=None, + bench_source_max_len=2048, + dataloader_prefetch_factor=None, + cosine_min_lr_ratio=None, + cosine_constant_lr_ratio=None, + loraplus_lr_ratio=None, + loraplus_lr_embedding=1e-06, + embedding_lr_scale=None, + lr_groups=None, + embedding_lr=None, + qlora=False, + orpo_alpha=None, + lisa_n_layers=None, + lisa_step_interval=None, + lisa_layers_attribute=None, + curriculum_sampling=None, + alternate_lr_scheduler_type=None, + chat_template=None, + kd_ce_alpha=None, + kd_alpha=1.0, + kd_temperature=1.0, + kd_zscore_base_temp=None, + kd_top_k_before_softmax=None, + adam_beta3=None, + adam_epsilon2=None, + image_size=None, + image_resize_algorithm=None, +)

Training arguments for Causal trainer

This code is duplicated due to HF TrainingArguments not setting output_dir with a default value so it can’t be used as a mixin.

@@ -863,55 +851,53 @@ default value so it can’t be used as a mixin.

AxolotlTrainingMixins

core.training_args.AxolotlTrainingMixins(
-    self,
-    model_type=None,
-    lr_quadratic_warmup=False,
-    pretraining=False,
-    sample_packing=False,
-    sample_packing_sequentially=False,
-    multipack_real_batches=False,
-    eval_sample_packing=None,
-    sample_packing_efficiency=1.0,
-    sample_packing_bin_size=200,
-    sample_packing_group_size=100000,
-    max_seq_length=2048,
-    relora_steps=None,
-    relora_warmup_steps=None,
-    relora_anneal_steps=None,
-    relora_prune_ratio=0.9,
-    bench_split='eval',
-    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
-    do_bench_eval=False,
-    do_causal_lm_eval=False,
-    max_bench_samples=None,
-    bench_source_max_len=2048,
-    dataloader_prefetch_factor=None,
-    cosine_min_lr_ratio=None,
-    cosine_constant_lr_ratio=None,
-    loraplus_lr_ratio=None,
-    loraplus_lr_embedding=1e-06,
-    embedding_lr_scale=None,
-    lr_groups=None,
-    embedding_lr=None,
-    qlora=False,
-    orpo_alpha=None,
-    lisa_n_layers=None,
-    lisa_step_interval=None,
-    lisa_layers_attribute=None,
-    curriculum_sampling=None,
-    alternate_optimizer=None,
-    alternate_lr_scheduler_type=None,
-    chat_template=None,
-    kd_ce_alpha=None,
-    kd_alpha=1.0,
-    kd_temperature=1.0,
-    kd_zscore_base_temp=None,
-    kd_top_k_before_softmax=None,
-    adam_beta3=None,
-    adam_epsilon2=None,
-    image_size=None,
-    image_resize_algorithm=None,
-)
+ model_type=None, + lr_quadratic_warmup=False, + pretraining=False, + sample_packing=False, + sample_packing_sequentially=False, + multipack_real_batches=False, + eval_sample_packing=None, + sample_packing_efficiency=1.0, + sample_packing_bin_size=200, + sample_packing_group_size=100000, + max_seq_length=2048, + relora_steps=None, + relora_warmup_steps=None, + relora_anneal_steps=None, + relora_prune_ratio=0.9, + bench_split='eval', + bench_dataset='pharaouk/dharma-1/dharma_1_mini.json', + do_bench_eval=False, + do_causal_lm_eval=False, + max_bench_samples=None, + bench_source_max_len=2048, + dataloader_prefetch_factor=None, + cosine_min_lr_ratio=None, + cosine_constant_lr_ratio=None, + loraplus_lr_ratio=None, + loraplus_lr_embedding=1e-06, + embedding_lr_scale=None, + lr_groups=None, + embedding_lr=None, + qlora=False, + orpo_alpha=None, + lisa_n_layers=None, + lisa_step_interval=None, + lisa_layers_attribute=None, + curriculum_sampling=None, + alternate_lr_scheduler_type=None, + chat_template=None, + kd_ce_alpha=None, + kd_alpha=1.0, + kd_temperature=1.0, + kd_zscore_base_temp=None, + kd_top_k_before_softmax=None, + adam_beta3=None, + adam_epsilon2=None, + image_size=None, + image_resize_algorithm=None, +)

Mixin class for the Axolotl training args.

diff --git a/docs/api/datasets.html b/docs/api/datasets.html index 3103f55e9..ac043b1c3 100644 --- a/docs/api/datasets.html +++ b/docs/api/datasets.html @@ -510,7 +510,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

ConstantLengthDataset

-
datasets.ConstantLengthDataset(self, tokenizer, datasets, seq_length=2048)
+
datasets.ConstantLengthDataset(tokenizer, datasets, seq_length=2048)

Iterable dataset that returns constant length chunks of tokens from stream of text files. Args: tokenizer (Tokenizer): The processor used for processing the data. @@ -520,13 +520,12 @@ seq_length (int): Length of token sequences to return.

TokenizedPromptDataset

datasets.TokenizedPromptDataset(
-    self,
-    prompt_tokenizer,
-    dataset,
-    process_count=None,
-    keep_in_memory=False,
-    **kwargs,
-)
+ prompt_tokenizer, + dataset, + process_count=None, + keep_in_memory=False, + **kwargs, +)

Dataset that returns tokenized prompts from a stream of text files. Args: prompt_tokenizer (PromptTokenizingStrategy): The prompt tokenizing method for processing the data. diff --git a/docs/api/index.html b/docs/api/index.html index 4fb5e126c..e7364b520 100644 --- a/docs/api/index.html +++ b/docs/api/index.html @@ -492,8 +492,16 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); Common logging module for axolotl -core.trainer_builder -Builder for the training args and trainer +core.builders.base +Base class for trainer builder + + +core.builders.causal +Builder for causal trainers + + +core.builders.rl +Builder for RLHF trainers core.training_args diff --git a/docs/api/integrations.base.html b/docs/api/integrations.base.html index 50aab98ff..6d5596952 100644 --- a/docs/api/integrations.base.html +++ b/docs/api/integrations.base.html @@ -527,7 +527,7 @@ Plugins can be used to integrate third-party models, modify the training process

BasePlugin

-
integrations.base.BasePlugin(self)
+
integrations.base.BasePlugin()

Base class for all plugins. Defines the interface for plugin methods.

A plugin is a reusable, modular, and self-contained piece of code that extends the functionality of Axolotl. Plugins can be used to integrate third-party models, diff --git a/docs/api/integrations.kd.trainer.html b/docs/api/integrations.kd.trainer.html index 9173983bb..ca77e4197 100644 --- a/docs/api/integrations.kd.trainer.html +++ b/docs/api/integrations.kd.trainer.html @@ -506,13 +506,12 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

AxolotlKDTrainer

integrations.kd.trainer.AxolotlKDTrainer(
-    self,
-    *_args,
-    bench_data_collator=None,
-    eval_data_collator=None,
-    dataset_tags=None,
-    **kwargs,
-)
+ *_args, + bench_data_collator=None, + eval_data_collator=None, + dataset_tags=None, + **kwargs, +)

Custom trainer subclass for Knowledge Distillation (KD)

Methods

diff --git a/docs/api/loaders.model.html b/docs/api/loaders.model.html index 58a3f26a4..d8199a120 100644 --- a/docs/api/loaders.model.html +++ b/docs/api/loaders.model.html @@ -507,14 +507,13 @@ models.

ModelLoader

loaders.model.ModelLoader(
-    self,
-    cfg,
-    tokenizer,
-    *,
-    inference=False,
-    reference_model=False,
-    **kwargs,
-)
+ cfg, + tokenizer, + *, + inference=False, + reference_model=False, + **kwargs, +)

Manages model configuration, initialization and application of patches during model loading.

This class orchestrates the entire process of loading a model from configuration to diff --git a/docs/api/loaders.patch_manager.html b/docs/api/loaders.patch_manager.html index 588d71943..925bab2b7 100644 --- a/docs/api/loaders.patch_manager.html +++ b/docs/api/loaders.patch_manager.html @@ -506,7 +506,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

PatchManager

-
loaders.patch_manager.PatchManager(self, cfg, model_config, inference=False)
+
loaders.patch_manager.PatchManager(cfg, model_config, inference=False)

Manages the application of patches during the model loading process.

Attributes

diff --git a/docs/api/monkeypatch.attention.mllama.html b/docs/api/monkeypatch.attention.mllama.html index effe5cb1a..640d86f65 100644 --- a/docs/api/monkeypatch.attention.mllama.html +++ b/docs/api/monkeypatch.attention.mllama.html @@ -510,23 +510,18 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

MllamaTextCrossFlashAttention2

-
monkeypatch.attention.mllama.MllamaTextCrossFlashAttention2(
-    self,
-    *args,
-    **kwargs,
-)
+
monkeypatch.attention.mllama.MllamaTextCrossFlashAttention2(*args, **kwargs)

Mllama flash cross-attention module. This module inherits from MllamaTextCrossAttention and implements the forward pass using Flash Attention for improved performance.

MllamaTextSelfFlashAttention2

monkeypatch.attention.mllama.MllamaTextSelfFlashAttention2(
-    self,
-    config,
-    layer_idx,
-    *args,
-    **kwargs,
-)
+ config, + layer_idx, + *args, + **kwargs, +)

Mllama flash self-attention module. This module inherits from MllamaTextSelfAttention and implements the forward pass using Flash Attention for improved performance.

diff --git a/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html b/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html index caace4c7e..2e6a8d0bf 100644 --- a/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html +++ b/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html @@ -572,11 +572,10 @@ Advanced disk-based gradient checkpointer with prefetching.

DiskOffloadManager

monkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager(
-    self,
-    prefetch_size=3,
-    prefetch_to_gpu=True,
-    save_workers=4,
-)
+ prefetch_size=3, + prefetch_to_gpu=True, + save_workers=4, +)

Manages offloaded tensors and handles prefetching in a separate thread. Includes synchronization to prevent race conditions.

diff --git a/docs/api/monkeypatch.llama_attn_hijack_flash.html b/docs/api/monkeypatch.llama_attn_hijack_flash.html index 82af4e23b..d2c81f747 100644 --- a/docs/api/monkeypatch.llama_attn_hijack_flash.html +++ b/docs/api/monkeypatch.llama_attn_hijack_flash.html @@ -516,7 +516,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

FusedAttention

-
monkeypatch.llama_attn_hijack_flash.FusedAttention(self, config, q, k, v, o)
+
monkeypatch.llama_attn_hijack_flash.FusedAttention(config, q, k, v, o)

Fused QKV Attention layer for incrementally improved training efficiency

diff --git a/docs/api/monkeypatch.lora_kernels.html b/docs/api/monkeypatch.lora_kernels.html index c0fae7f0a..a7639ab2c 100644 --- a/docs/api/monkeypatch.lora_kernels.html +++ b/docs/api/monkeypatch.lora_kernels.html @@ -513,7 +513,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

FakeMLP

-
monkeypatch.lora_kernels.FakeMLP(self, gate_proj, up_proj, down_proj)
+
monkeypatch.lora_kernels.FakeMLP(gate_proj, up_proj, down_proj)

placeholder MLP for triton patching

diff --git a/docs/api/monkeypatch.relora.html b/docs/api/monkeypatch.relora.html index 6d1c938a6..c9af581e6 100644 --- a/docs/api/monkeypatch.relora.html +++ b/docs/api/monkeypatch.relora.html @@ -510,20 +510,19 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

ReLoRACallback

-
monkeypatch.relora.ReLoRACallback(self, cfg)
+
monkeypatch.relora.ReLoRACallback(cfg)

Callback to merge LoRA weights into the base model and save full-weight checkpoints

ReLoRAScheduler

monkeypatch.relora.ReLoRAScheduler(
-    self,
-    optimizer,
-    inner_schedule,
-    relora_steps,
-    warmup_steps,
-    anneal_steps=1,
-    min_lr_scale=0.001,
-)
+ optimizer, + inner_schedule, + relora_steps, + warmup_steps, + anneal_steps=1, + min_lr_scale=0.001, +)

Wraps another scheduler to apply per-lora-restart learning rate warmups.

diff --git a/docs/api/prompt_strategies.alpaca_chat.html b/docs/api/prompt_strategies.alpaca_chat.html index 441e544f3..4ad672262 100644 --- a/docs/api/prompt_strategies.alpaca_chat.html +++ b/docs/api/prompt_strategies.alpaca_chat.html @@ -525,42 +525,39 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

AlpacaChatPrompter

-
prompt_strategies.alpaca_chat.AlpacaChatPrompter(self)
+
prompt_strategies.alpaca_chat.AlpacaChatPrompter()

Alpaca Chat Prompter extending the system prompt to for chat-instruct answers

AlpacaConcisePrompter

prompt_strategies.alpaca_chat.AlpacaConcisePrompter(
-    self,
-    prompt_style=PromptStyle.INSTRUCT.value,
-)
+ prompt_style=PromptStyle.INSTRUCT.value, +)

Alpaca Prompter extending the system prompt to ask for concise chat-instruct answers

AlpacaQAPromptTokenizingStrategy

prompt_strategies.alpaca_chat.AlpacaQAPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for AlpacaQA

CamelAIPromptTokenizingStrategy

prompt_strategies.alpaca_chat.CamelAIPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for CamelAI datasets

NoSystemPrompter

-
prompt_strategies.alpaca_chat.NoSystemPrompter(self)
+
prompt_strategies.alpaca_chat.NoSystemPrompter()

Null Prompter with no system prompts

diff --git a/docs/api/prompt_strategies.alpaca_w_system.html b/docs/api/prompt_strategies.alpaca_w_system.html index 741247137..0e116d93b 100644 --- a/docs/api/prompt_strategies.alpaca_w_system.html +++ b/docs/api/prompt_strategies.alpaca_w_system.html @@ -521,39 +521,35 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

InstructionWSystemPromptTokenizingStrategy

prompt_strategies.alpaca_w_system.InstructionWSystemPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for instruction-based prompts.

OpenOrcaPromptTokenizingStrategy

prompt_strategies.alpaca_w_system.OpenOrcaPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for OpenOrca datasets

OpenOrcaSystemDataPrompter

prompt_strategies.alpaca_w_system.OpenOrcaSystemDataPrompter(
-    self,
-    prompt_style=PromptStyle.INSTRUCT.value,
-)
+ prompt_style=PromptStyle.INSTRUCT.value, +)

Alpaca Style Prompter that uses system prompts from the dataset, with OpenOrca prompts

SystemDataPrompter

prompt_strategies.alpaca_w_system.SystemDataPrompter(
-    self,
-    prompt_style=PromptStyle.INSTRUCT.value,
-)
+ prompt_style=PromptStyle.INSTRUCT.value, +)

Alpaca Style Prompter that uses system prompts from the dataset

diff --git a/docs/api/prompt_strategies.chat_template.html b/docs/api/prompt_strategies.chat_template.html index 06eee96ac..63a1ed4bf 100644 --- a/docs/api/prompt_strategies.chat_template.html +++ b/docs/api/prompt_strategies.chat_template.html @@ -516,35 +516,33 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

ChatTemplatePrompter

prompt_strategies.chat_template.ChatTemplatePrompter(
-    self,
-    tokenizer,
-    chat_template,
-    processor=None,
-    max_length=2048,
-    message_property_mappings=None,
-    message_field_training=None,
-    message_field_training_detail=None,
-    field_messages='messages',
-    field_system='system',
-    roles=None,
-    drop_system_message=False,
-)
+ tokenizer, + chat_template, + processor=None, + max_length=2048, + message_property_mappings=None, + message_field_training=None, + message_field_training_detail=None, + field_messages='messages', + field_system='system', + roles=None, + drop_system_message=False, +)

Prompter for HF chat templates

ChatTemplateStrategy

prompt_strategies.chat_template.ChatTemplateStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs,
-    sequence_len,
-    roles_to_train=None,
-    train_on_eos=None,
-    train_on_eot=None,
-    eot_tokens=None,
-    split_thinking=False,
-)
+ prompter, + tokenizer, + train_on_inputs, + sequence_len, + roles_to_train=None, + train_on_eos=None, + train_on_eot=None, + eot_tokens=None, + split_thinking=False, +)

Tokenizing strategy for instruction-based prompts.

Methods

diff --git a/docs/api/prompt_strategies.completion.html b/docs/api/prompt_strategies.completion.html index 994e51f28..1c81014e5 100644 --- a/docs/api/prompt_strategies.completion.html +++ b/docs/api/prompt_strategies.completion.html @@ -511,11 +511,10 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

CompletionPromptTokenizingStrategy

prompt_strategies.completion.CompletionPromptTokenizingStrategy(
-    self,
-    *args,
-    max_length=None,
-    **kwargs,
-)
+ *args, + max_length=None, + **kwargs, +)

Tokenizing strategy for Completion prompts.

diff --git a/docs/api/prompt_strategies.input_output.html b/docs/api/prompt_strategies.input_output.html index 5de510f53..c513f009c 100644 --- a/docs/api/prompt_strategies.input_output.html +++ b/docs/api/prompt_strategies.input_output.html @@ -516,11 +516,10 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

RawInputOutputStrategy

prompt_strategies.input_output.RawInputOutputStrategy(
-    self,
-    *args,
-    eos_token=None,
-    **kwargs,
-)
+ *args, + eos_token=None, + **kwargs, +)

Prompt Strategy class for input/output pairs

diff --git a/docs/api/prompt_strategies.llama2_chat.html b/docs/api/prompt_strategies.llama2_chat.html index 1b516ec6d..4e3115fd3 100644 --- a/docs/api/prompt_strategies.llama2_chat.html +++ b/docs/api/prompt_strategies.llama2_chat.html @@ -528,24 +528,19 @@ For a custom system message, the first “from” can be “system” (followed

LLama2ChatTokenizingStrategy

-
prompt_strategies.llama2_chat.LLama2ChatTokenizingStrategy(
-    self,
-    *args,
-    **kwargs,
-)
+
prompt_strategies.llama2_chat.LLama2ChatTokenizingStrategy(*args, **kwargs)

Tokenizing strategy for Llama2 prompts. adapted from https://github.com/lm-sys/FastChat/blob/main/fastchat/train/train.py

Llama2ChatConversation

prompt_strategies.llama2_chat.Llama2ChatConversation(
-    self,
-    name='llama2',
-    system="[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n<</SYS>>\n\n",
-    roles=('[INST]', '[/INST]'),
-    messages=list(),
-    offset=0,
-)
+ name='llama2', + system="[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n<</SYS>>\n\n", + roles=('[INST]', '[/INST]'), + messages=list(), + offset=0, +)

A class that manages prompt templates and keeps all conversation history. copied from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py

diff --git a/docs/api/prompt_strategies.messages.chat.html b/docs/api/prompt_strategies.messages.chat.html index d8d24b6e2..7a175ef8b 100644 --- a/docs/api/prompt_strategies.messages.chat.html +++ b/docs/api/prompt_strategies.messages.chat.html @@ -506,12 +506,11 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

ChatMessageDatasetWrappingStrategy

prompt_strategies.messages.chat.ChatMessageDatasetWrappingStrategy(
-    self,
-    processor,
-    message_transform=None,
-    formatter=None,
-    **kwargs,
-)
+ processor, + message_transform=None, + formatter=None, + **kwargs, +)

Chat dataset wrapping strategy for new internal messages representations

diff --git a/docs/api/prompt_strategies.metharme.html b/docs/api/prompt_strategies.metharme.html index 60d1d111d..e3bc9830b 100644 --- a/docs/api/prompt_strategies.metharme.html +++ b/docs/api/prompt_strategies.metharme.html @@ -511,17 +511,16 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

MetharmePromptTokenizingStrategy

prompt_strategies.metharme.MetharmePromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for the Metharme models

MetharmePrompter

-
prompt_strategies.metharme.MetharmePrompter(self, *args, **kwargs)
+
prompt_strategies.metharme.MetharmePrompter(*args, **kwargs)

Prompter for the Metharme models.

diff --git a/docs/api/prompt_strategies.orcamini.html b/docs/api/prompt_strategies.orcamini.html index f79c3e8a6..654cb019b 100644 --- a/docs/api/prompt_strategies.orcamini.html +++ b/docs/api/prompt_strategies.orcamini.html @@ -511,9 +511,8 @@ this one specifies the system prompt with “### System:”.

OrcaMiniPrompter

prompt_strategies.orcamini.OrcaMiniPrompter(
-    self,
-    prompt_style=PromptStyle.INSTRUCT.value,
-)
+ prompt_style=PromptStyle.INSTRUCT.value, +)

Adjusted Prompter for Orca Mini (v2) datasets

diff --git a/docs/api/prompt_strategies.orpo.chat_template.html b/docs/api/prompt_strategies.orpo.chat_template.html index 2a27b4401..5944c34f6 100644 --- a/docs/api/prompt_strategies.orpo.chat_template.html +++ b/docs/api/prompt_strategies.orpo.chat_template.html @@ -590,21 +590,16 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

ORPOPrompter

-
prompt_strategies.orpo.chat_template.ORPOPrompter(
-    self,
-    chat_template,
-    tokenizer,
-)
+
prompt_strategies.orpo.chat_template.ORPOPrompter(chat_template, tokenizer)

Single Turn prompter for ORPO

ORPOTokenizingStrategy

prompt_strategies.orpo.chat_template.ORPOTokenizingStrategy(
-    self,
-    *args,
-    dataset_parser=None,
-    **kwargs,
-)
+ *args, + dataset_parser=None, + **kwargs, +)

rejected_input_ids input_ids rejected_attention_mask diff --git a/docs/api/prompt_strategies.pygmalion.html b/docs/api/prompt_strategies.pygmalion.html index ec56b3e15..278e4490a 100644 --- a/docs/api/prompt_strategies.pygmalion.html +++ b/docs/api/prompt_strategies.pygmalion.html @@ -511,17 +511,16 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

PygmalionPromptTokenizingStrategy

prompt_strategies.pygmalion.PygmalionPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    *args,
-    **kwargs,
-)
+ prompter, + tokenizer, + *args, + **kwargs, +)

Tokenizing strategy for Pygmalion.

PygmalionPrompter

-
prompt_strategies.pygmalion.PygmalionPrompter(self, *args, **kwargs)
+
prompt_strategies.pygmalion.PygmalionPrompter(*args, **kwargs)

Prompter for Pygmalion.

diff --git a/docs/api/prompt_strategies.stepwise_supervised.html b/docs/api/prompt_strategies.stepwise_supervised.html index c99002b38..d7383313f 100644 --- a/docs/api/prompt_strategies.stepwise_supervised.html +++ b/docs/api/prompt_strategies.stepwise_supervised.html @@ -507,13 +507,12 @@ and (optionally) per-step, or per-prompt-trace labels for reward modelling.

StepwiseSupervisedPromptTokenizingStrategy

prompt_strategies.stepwise_supervised.StepwiseSupervisedPromptTokenizingStrategy(
-    self,
-    tokenizer,
-    sequence_len=2048,
-    step_separator='\n',
-    max_completion_length=None,
-    train_on_last_step_only=False,
-)
+ tokenizer, + sequence_len=2048, + step_separator='\n', + max_completion_length=None, + train_on_last_step_only=False, +)

Tokenizing strategy for supervised stepwise datasets, typically used for COT-reasoning. These datasets should include the following columns: - prompt: the prompt text diff --git a/docs/api/prompt_strategies.user_defined.html b/docs/api/prompt_strategies.user_defined.html index 4a1c378bd..6ff70baec 100644 --- a/docs/api/prompt_strategies.user_defined.html +++ b/docs/api/prompt_strategies.user_defined.html @@ -511,27 +511,25 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

UserDefinedDatasetConfig

prompt_strategies.user_defined.UserDefinedDatasetConfig(
-    self,
-    system_prompt='',
-    field_system='system',
-    field_instruction='instruction',
-    field_input='input',
-    field_output='output',
-    format='{instruction} {input} ',
-    no_input_format='{instruction} ',
-    system_format='{system}',
-)
+ system_prompt='', + field_system='system', + field_instruction='instruction', + field_input='input', + field_output='output', + format='{instruction} {input} ', + no_input_format='{instruction} ', + system_format='{system}', +)

dataclass configuration representing a userdefined dataset type

UserDefinedPromptTokenizationStrategy

prompt_strategies.user_defined.UserDefinedPromptTokenizationStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Prompt Tokenization Strategy for user defined prompts

diff --git a/docs/api/prompt_tokenizers.html b/docs/api/prompt_tokenizers.html index 2ad4faecc..f8fd62fe3 100644 --- a/docs/api/prompt_tokenizers.html +++ b/docs/api/prompt_tokenizers.html @@ -571,34 +571,31 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

AlpacaMultipleChoicePromptTokenizingStrategy

prompt_tokenizers.AlpacaMultipleChoicePromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for Alpaca Multiple Choice prompts.

AlpacaPromptTokenizingStrategy

prompt_tokenizers.AlpacaPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for Alpaca prompts.

AlpacaReflectionPTStrategy

prompt_tokenizers.AlpacaReflectionPTStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for Alpaca Reflection prompts.

@@ -609,23 +606,21 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

GPTeacherPromptTokenizingStrategy

prompt_tokenizers.GPTeacherPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for GPTeacher prompts.

InstructionPromptTokenizingStrategy

prompt_tokenizers.InstructionPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for instruction-based prompts.

@@ -636,67 +631,61 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

JeopardyPromptTokenizingStrategy

prompt_tokenizers.JeopardyPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for Jeopardy prompts.

NomicGPT4AllPromptTokenizingStrategy

prompt_tokenizers.NomicGPT4AllPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for NomicGPT4All prompts.

OpenAssistantPromptTokenizingStrategy

prompt_tokenizers.OpenAssistantPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for OpenAssistant prompts.

PromptTokenizingStrategy

prompt_tokenizers.PromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Abstract class for tokenizing strategies

ReflectionPromptTokenizingStrategy

prompt_tokenizers.ReflectionPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for Reflection prompts.

SummarizeTLDRPromptTokenizingStrategy

prompt_tokenizers.SummarizeTLDRPromptTokenizingStrategy(
-    self,
-    prompter,
-    tokenizer,
-    train_on_inputs=False,
-    sequence_len=2048,
-)
+ prompter, + tokenizer, + train_on_inputs=False, + sequence_len=2048, +)

Tokenizing strategy for SummarizeTLDR prompts.

diff --git a/docs/api/utils.callbacks.comet_.html b/docs/api/utils.callbacks.comet_.html index d02644c5e..f9a69f702 100644 --- a/docs/api/utils.callbacks.comet_.html +++ b/docs/api/utils.callbacks.comet_.html @@ -505,10 +505,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

SaveAxolotlConfigtoCometCallback

-
utils.callbacks.comet_.SaveAxolotlConfigtoCometCallback(
-    self,
-    axolotl_config_path,
-)
+
utils.callbacks.comet_.SaveAxolotlConfigtoCometCallback(axolotl_config_path)

Callback to save axolotl config to comet

diff --git a/docs/api/utils.callbacks.mlflow_.html b/docs/api/utils.callbacks.mlflow_.html index 2c2266b7f..8617d93a0 100644 --- a/docs/api/utils.callbacks.mlflow_.html +++ b/docs/api/utils.callbacks.mlflow_.html @@ -505,10 +505,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

SaveAxolotlConfigtoMlflowCallback

-
utils.callbacks.mlflow_.SaveAxolotlConfigtoMlflowCallback(
-    self,
-    axolotl_config_path,
-)
+
utils.callbacks.mlflow_.SaveAxolotlConfigtoMlflowCallback(axolotl_config_path)

Callback to save axolotl config to mlflow

diff --git a/docs/api/utils.callbacks.perplexity.html b/docs/api/utils.callbacks.perplexity.html index d2264583c..107a34d5e 100644 --- a/docs/api/utils.callbacks.perplexity.html +++ b/docs/api/utils.callbacks.perplexity.html @@ -505,7 +505,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

Perplexity

-
utils.callbacks.perplexity.Perplexity(self, tokenizer, max_seq_len, stride=512)
+
utils.callbacks.perplexity.Perplexity(tokenizer, max_seq_len, stride=512)

Calculate perplexity as defined in https://huggingface.co/docs/transformers/en/perplexity. This is a custom variant that doesn’t re-tokenize the input or re-load the model.

diff --git a/docs/api/utils.callbacks.profiler.html b/docs/api/utils.callbacks.profiler.html index 9458d72aa..57fb59806 100644 --- a/docs/api/utils.callbacks.profiler.html +++ b/docs/api/utils.callbacks.profiler.html @@ -505,7 +505,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

PytorchProfilerCallback

-
utils.callbacks.profiler.PytorchProfilerCallback(self, steps_to_profile=5)
+
utils.callbacks.profiler.PytorchProfilerCallback(steps_to_profile=5)

PyTorch Profiler callback to create snapshots of GPU memory usage at specified steps.

diff --git a/docs/api/utils.callbacks.qat.html b/docs/api/utils.callbacks.qat.html index dc3bcb704..c6e59af45 100644 --- a/docs/api/utils.callbacks.qat.html +++ b/docs/api/utils.callbacks.qat.html @@ -509,7 +509,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

QATCallback

-
utils.callbacks.qat.QATCallback(self, cfg)
+
utils.callbacks.qat.QATCallback(cfg)

Callback to toggle fake quantization for the model.

diff --git a/docs/api/utils.collators.batching.html b/docs/api/utils.collators.batching.html index 8c8455297..4772693b7 100644 --- a/docs/api/utils.collators.batching.html +++ b/docs/api/utils.collators.batching.html @@ -521,31 +521,29 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

BatchSamplerDataCollatorForSeq2Seq

utils.collators.batching.BatchSamplerDataCollatorForSeq2Seq(
-    self,
-    tokenizer,
-    model=None,
-    padding=True,
-    max_length=None,
-    pad_to_multiple_of=None,
-    label_pad_token_id=-100,
-    position_pad_token_id=0,
-    return_tensors='pt',
-)
+ tokenizer, + model=None, + padding=True, + max_length=None, + pad_to_multiple_of=None, + label_pad_token_id=-100, + position_pad_token_id=0, + return_tensors='pt', +)

Collator for multipack specific to the using the BatchSampler

DataCollatorForSeq2Seq

utils.collators.batching.DataCollatorForSeq2Seq(
-    self,
-    tokenizer,
-    model=None,
-    padding=True,
-    max_length=None,
-    pad_to_multiple_of=None,
-    label_pad_token_id=-100,
-    position_pad_token_id=0,
-    return_tensors='pt',
-)
+ tokenizer, + model=None, + padding=True, + max_length=None, + pad_to_multiple_of=None, + label_pad_token_id=-100, + position_pad_token_id=0, + return_tensors='pt', +)

Data collator that will dynamically pad the inputs received, as well as the labels and position_ids

Parameters

@@ -614,26 +612,24 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

PretrainingBatchSamplerDataCollatorForSeq2Seq

utils.collators.batching.PretrainingBatchSamplerDataCollatorForSeq2Seq(
-    self,
-    *args,
-    multipack_attn=True,
-    **kwargs,
-)
+ *args, + multipack_attn=True, + **kwargs, +)

Collator for multipack specific to the using the BatchSampler

V2BatchSamplerDataCollatorForSeq2Seq

utils.collators.batching.V2BatchSamplerDataCollatorForSeq2Seq(
-    self,
-    tokenizer,
-    model=None,
-    padding=True,
-    max_length=None,
-    pad_to_multiple_of=None,
-    label_pad_token_id=-100,
-    position_pad_token_id=0,
-    return_tensors='pt',
-)
+ tokenizer, + model=None, + padding=True, + max_length=None, + pad_to_multiple_of=None, + label_pad_token_id=-100, + position_pad_token_id=0, + return_tensors='pt', +)

Collator for multipack specific to the using the BatchSampler

diff --git a/docs/api/utils.collators.mamba.html b/docs/api/utils.collators.mamba.html index 7f8cdd430..8fe7c20de 100644 --- a/docs/api/utils.collators.mamba.html +++ b/docs/api/utils.collators.mamba.html @@ -505,7 +505,7 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

MambaDataCollator

-
utils.collators.mamba.MambaDataCollator(self, tokenizer)
+
utils.collators.mamba.MambaDataCollator(tokenizer)

Collator for State Space Models (Mamba)

diff --git a/docs/api/utils.collators.mm_chat.html b/docs/api/utils.collators.mm_chat.html index 96fed97c7..69ece1fc9 100644 --- a/docs/api/utils.collators.mm_chat.html +++ b/docs/api/utils.collators.mm_chat.html @@ -506,14 +506,13 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

MultiModalChatDataCollator

utils.collators.mm_chat.MultiModalChatDataCollator(
-    self,
-    tokenizer,
-    processing_strategy,
-    packing=False,
-    return_tensors='pt',
-    padding=True,
-    pad_to_multiple_of=None,
-)
+ tokenizer, + processing_strategy, + packing=False, + return_tensors='pt', + padding=True, + pad_to_multiple_of=None, +)

Collator for multi-modal chat messages

diff --git a/docs/api/utils.ctx_managers.sequence_parallel.html b/docs/api/utils.ctx_managers.sequence_parallel.html index 478f010e0..b0025efdb 100644 --- a/docs/api/utils.ctx_managers.sequence_parallel.html +++ b/docs/api/utils.ctx_managers.sequence_parallel.html @@ -680,13 +680,12 @@ from the full gradient tensor.

SequenceParallelContextManager

utils.ctx_managers.sequence_parallel.SequenceParallelContextManager(
-    self,
-    models,
-    sequence_parallel_degree,
-    gradient_accumulation_steps,
-    ring_attn_func,
-    heads_k_stride,
-)
+ models, + sequence_parallel_degree, + gradient_accumulation_steps, + ring_attn_func, + heads_k_stride, +)

Context manager for sequence parallelism operations.

This class provides a context that will automatically apply sequence parallelism during model forward passes using a pre-forward hook, and gather outputs from diff --git a/docs/api/utils.freeze.html b/docs/api/utils.freeze.html index 5b15c7756..d1c1bb14f 100644 --- a/docs/api/utils.freeze.html +++ b/docs/api/utils.freeze.html @@ -538,7 +538,7 @@ window.Quarto = {

LayerNamePattern

-
utils.freeze.LayerNamePattern(self, pattern)
+
utils.freeze.LayerNamePattern(pattern)

Represents a regex pattern for layer names, potentially including a parameter index range.

Methods

diff --git a/docs/api/utils.samplers.multipack.html b/docs/api/utils.samplers.multipack.html index 4b7acd69b..83307f3ab 100644 --- a/docs/api/utils.samplers.multipack.html +++ b/docs/api/utils.samplers.multipack.html @@ -514,21 +514,20 @@ into fixed-capacity batches to optimize memory usage and training throughput.

MultipackBatchSampler

utils.samplers.multipack.MultipackBatchSampler(
-    self,
-    sampler,
-    batch_size,
-    batch_max_len,
-    lengths,
-    packing_efficiency_estimate=1.0,
-    drop_last=False,
-    num_count_samples=16,
-    sequential=False,
-    group_size=100000,
-    bin_size=200,
-    num_processes=None,
-    safe_mode=True,
-    **kwargs,
-)
+ sampler, + batch_size, + batch_max_len, + lengths, + packing_efficiency_estimate=1.0, + drop_last=False, + num_count_samples=16, + sequential=False, + group_size=100000, + bin_size=200, + num_processes=None, + safe_mode=True, + **kwargs, +)

Batch sampler class for efficient packing of variable-length sequences

This sampler packs sequences into fixed-capacity bins (batches) to maximize GPU memory utilization and training throughput by reducing padding.

diff --git a/docs/api/utils.schedulers.html b/docs/api/utils.schedulers.html index dab9ab7f8..5ac8d5fae 100644 --- a/docs/api/utils.schedulers.html +++ b/docs/api/utils.schedulers.html @@ -517,26 +517,24 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});

InterpolatingLogScheduler

utils.schedulers.InterpolatingLogScheduler(
-    self,
-    optimizer,
-    num_steps,
-    min_lr,
-    max_lr,
-    last_epoch=-1,
-)
+ optimizer, + num_steps, + min_lr, + max_lr, + last_epoch=-1, +)

A scheduler that interpolates learning rates in a logarithmic fashion

RexLR

utils.schedulers.RexLR(
-    self,
-    optimizer,
-    max_lr,
-    min_lr,
-    total_steps=0,
-    num_warmup_steps=0,
-    last_step=0,
-)
+ optimizer, + max_lr, + min_lr, + total_steps=0, + num_warmup_steps=0, + last_step=0, +)

Reflected Exponential (REX) learning rate scheduler.

  • Original implementation: https://github.com/IvanVassi/REX_LR
  • diff --git a/docs/config.html b/docs/config.html index 9b22919b7..0cc78a4fb 100644 --- a/docs/config.html +++ b/docs/config.html @@ -1003,283 +1003,284 @@ gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true}); # setting to `auto` will enable torch compile when torch>=2.5.1 torch_compile: # Optional[Union[Literal["auto"], bool]] torch_compile_backend: # Optional[str] - -# Training hyperparameters - -# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps. -gradient_accumulation_steps: 1 -# The number of samples to include in each batch. This is the number of samples sent to each GPU. -# Batch size per gpu = micro_batch_size * gradient_accumulation_steps -micro_batch_size: 2 -eval_batch_size: -num_epochs: 4 -warmup_steps: 100 # cannot use with warmup_ratio -warmup_ratio: 0.05 # cannot use with warmup_steps -learning_rate: 0.00003 -lr_quadratic_warmup: -logging_steps: -eval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps -evals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps -eval_strategy: # Set to `"no"` to skip evaluation, `"epoch"` at end of each epoch, leave empty to infer from `eval_steps`. -save_strategy: # Set to `"no"` to skip checkpoint saves, `"epoch"` at end of each epoch, `"best"` when better result is achieved, leave empty to infer from `save_steps`. -save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps -saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps -save_total_limit: # Checkpoints saved at a time -save_only_model: # Save only the model weights, skipping the optimizer. Using this means you can't resume from checkpoints. -# Maximum number of iterations to train for. It precedes num_epochs which means that -# if both are set, num_epochs will not be guaranteed. -# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps -max_steps: - -# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time. -include_tokens_per_second: # Optional[bool] - -# whether to find batch size that fits in memory. Passed to underlying transformers Trainer -auto_find_batch_size: # Optional[bool] - -eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0 -eval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128 -do_causal_lm_eval: # Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`. -eval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is ["sacrebleu", "comet", "ter", "chrf", "perplexity"] - -profiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir. - # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information - # snapshots can be visualized @ https://pytorch.org/memory_viz - -loss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training) -loss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3) - -# Save model as safetensors (require safetensors package) -save_safetensors: - -# Whether to mask out or include the human's prompt from the training labels -train_on_inputs: false -# Group similarly sized data to minimize padding. -# May be slower to start, as it must download and sort the entire dataset. -# Note that training loss may have an oscillating pattern with this enabled. -group_by_length: false - -# Whether to use gradient checkpointing. Available options are: true, false, "offload", "offload_disk". -# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing -gradient_checkpointing: false -# additional kwargs to pass to the trainer for gradient checkpointing -# gradient_checkpointing_kwargs: -# use_reentrant: true - -# Stop training after this many evaluation losses have increased in a row -# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback -early_stopping_patience: 3 - -# Specify a scheduler and kwargs to use with the optimizer -# Valid values are driven by the Transformers SchedulerType class, see: -# https://github.com/huggingface/transformers/blob/5f4ecf2d9f867a1255131d2461d75793c0cf1db2/src/transformers/trainer_utils.py#L420 -# Valid values include -# - 'linear' -# - 'cosine' (default) -# - 'cosine_with_restarts' -# - 'polynomial' -# - 'constant' -# - 'constant_with_warmup' -# - 'inverse_sqrt' -# - 'reduce_lr_on_plateau' -# - 'cosine_with_min_lr' -# - 'warmup_stable_decay' - -# Additional schedulers include: -# - 'one_cycle' -# - 'rex' -lr_scheduler: -lr_scheduler_kwargs: -cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr -cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf) - -# For one_cycle optim -lr_div_factor: # Learning rate div factor - -# Specify optimizer -# Valid values are driven by the Transformers OptimizerNames class, see: -# https://github.com/huggingface/transformers/blob/cbf924b76c03828101a34069a96d209314114fd5/src/transformers/training_args.py#L144-L189 -# -# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of -# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used -# in the examples/ for your model and fine-tuning use case. -# -# Valid values for 'optimizer' include: -# - adamw_torch -# - adamw_torch_fused (default) -# - adamw_torch_xla -# - adamw_torch_npu_fused -# - adamw_apex_fused -# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1) -# - adafactor -# - adamw_anyprecision -# - adamw_torch_4bit -# - ademamix -# - sgd -# - adagrad -# - adamw_bnb_8bit -# - adamw_8bit # alias for adamw_bnb_8bit -# - ademamix_8bit -# - lion_8bit -# - lion_32bit -# - paged_adamw_32bit -# - paged_adamw_8bit -# - paged_ademamix_32bit -# - paged_ademamix_8bit -# - paged_lion_32bit -# - paged_lion_8bit -# - rmsprop -# - rmsprop_bnb -# - rmsprop_bnb_8bit -# - rmsprop_bnb_32bit -# - galore_adamw -# - galore_adamw_8bit -# - galore_adafactor -# - galore_adamw_layerwise -# - galore_adamw_8bit_layerwise -# - galore_adafactor_layerwise -# - lomo -# - adalomo -# - grokadamw -# - schedule_free_adamw -# - schedule_free_sgd -# - apollo_adamw -# - apollo_adamw_layerwise -# -# Additional custom optimizers include: -# - optimi_adamw -# - ao_adamw_8bit -# - ao_adamw_fp8 -# - came_pytorch -optimizer: -# Dictionary of arguments to pass to the optimizer -optim_args: -# For Galore Optimizers the following optim_args are available -# rank: # type: int -# update_proj_gap # type: int -# scale # type: float -# proj_type: # type: str, default = std - -# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm -optim_target_modules: -# - self_attn # for llama -# - mlp - -# Specify weight decay -weight_decay: -# adamw hyperparams -adam_beta1: -adam_beta2: -adam_beta3: # only used for CAME Optimizer -adam_epsilon: -adam_epsilon2: # only used for CAME Optimizer -# Gradient clipping max norm -max_grad_norm: - -# Augmentation techniques -# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings -# currently only supported on Llama and Mistral -neftune_noise_alpha: - -# Optional[bool]. Whether to bettertransformers -flash_optimum: - -# Note: Only one of the following attention patches can be used at a time. -# For example, if you set `xformers_attention` to `true`, do not set `flash_attention` to `true`. - -# Optional[bool]. Whether to use xformers attention patch https://github.com/facebookresearch/xformers: -xformers_attention: -# Optional[bool]. Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention: -flash_attention: -flash_attn_cross_entropy: # Optional[bool]. Whether to use flash-attention cross entropy implementation - advanced use only -flash_attn_rms_norm: # Optional[bool]. Whether to use flash-attention rms norm implementation - advanced use only -flash_attn_fuse_qkv: # Optional[bool]. Whether to fuse QKV into a single operation -flash_attn_fuse_mlp: # Optional[bool]. Whether to fuse part of the MLP into a single operation -# Optional[bool]. Whether to use scaled-dot-product attention -# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html -sdp_attention: -# Optional[bool]. Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf -s2_attention: - -# Optional[bool]. Whether to use low_cpu_mem_usage -low_cpu_mem_usage: -# Optional[str]. Resume from a specific checkpoint dir -resume_from_checkpoint: -# Optional[bool]. If resume_from_checkpoint isn't set and you simply want it to start where it left off. -# Be careful with this being turned on between different models. -auto_resume_from_checkpoints: false - -## Multimodal section -# int | tuple[int, int] | None . Size to resize images to, width x height. -# Will read from model/processor config if not set. -image_size: -# str. Algorithm to use for image resizing. "bilinear", "bicubic", "lanczos". Default is "bilinear". -image_resize_algorithm: 'bilinear' -## End of multimodal section - -# Don't mess with this, it's here for accelerate and torchrun -local_rank: - -# Add or change special tokens. -# If you add tokens here, you don't need to add them to the `tokens` list. -special_tokens: - # bos_token: "<s>" - # eos_token: "</s>" - # unk_token: "<unk>" - # pad_token: "[PAD]" - -# Optional[list[str]]. Add extra tokens to the tokenizer. -tokens: - # - "<|startoftext|>" - # - "<|endoftext|>" - -# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer. -# Only works for tokens that are not part of the base vocab (aka are added_tokens). -# Can be checked if they exist in tokenizer.json added_tokens. -added_tokens_overrides: # Dict[int, str] -# 128041: "<|im_start|>" -# 128042: "<|im_end|>" - -# FSDP -fsdp: -fsdp_config: - -# Deepspeed config path. e.g., deepspeed_configs/zero3.json -deepspeed: - -# Advanced DDP Arguments -ddp_timeout: -ddp_bucket_cap_mb: -ddp_broadcast_buffers: - -# Sequence parallelism -# Set to a divisor of the number of GPUs available to split sequences into chunks of equal size. -# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM. -# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized -# subsequences, or set to 4 to split into four equal-sized subsequences. -# See https://docs.axolotl.ai/docs/sequence_parallelism.html for more details. -sequence_parallel_degree: -# Optional; strides across the key dimension. Larger values use more memory but should make training faster. -# Must evenly divide the number of KV heads in your model. -heads_k_stride: 1 -# One of "varlen_llama3", "batch_ring", "batch_zigzag", "batch_stripe". Defaults to "varlen_llama3" -# in the sample packing case, and "batch_ring" in the non-sample packing case. -ring_attn_func: - -# Path to torch distx for optim 'adamw_anyprecision' -torchdistx_path: - -# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize -pretraining_dataset: - -# Debug mode -debug: - -# Seed -seed: - -# Allow overwrite yml config using from cli -strict: +torch_compile_mode: # 'default' | 'reduce-overhead' | 'max-autotune' + +# Training hyperparameters + +# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps. +gradient_accumulation_steps: 1 +# The number of samples to include in each batch. This is the number of samples sent to each GPU. +# Batch size per gpu = micro_batch_size * gradient_accumulation_steps +micro_batch_size: 2 +eval_batch_size: +num_epochs: 4 +warmup_steps: 100 # cannot use with warmup_ratio +warmup_ratio: 0.05 # cannot use with warmup_steps +learning_rate: 0.00003 +lr_quadratic_warmup: +logging_steps: +eval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps +evals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps +eval_strategy: # Set to `"no"` to skip evaluation, `"epoch"` at end of each epoch, leave empty to infer from `eval_steps`. +save_strategy: # Set to `"no"` to skip checkpoint saves, `"epoch"` at end of each epoch, `"best"` when better result is achieved, leave empty to infer from `save_steps`. +save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps +saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps +save_total_limit: # Checkpoints saved at a time +save_only_model: # Save only the model weights, skipping the optimizer. Using this means you can't resume from checkpoints. +# Maximum number of iterations to train for. It precedes num_epochs which means that +# if both are set, num_epochs will not be guaranteed. +# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps +max_steps: + +# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time. +include_tokens_per_second: # Optional[bool] + +# whether to find batch size that fits in memory. Passed to underlying transformers Trainer +auto_find_batch_size: # Optional[bool] + +eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0 +eval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128 +do_causal_lm_eval: # Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`. +eval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is ["sacrebleu", "comet", "ter", "chrf", "perplexity"] + +profiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir. + # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information + # snapshots can be visualized @ https://pytorch.org/memory_viz + +loss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training) +loss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3) + +# Save model as safetensors (require safetensors package). Default True +save_safetensors: + +# Whether to mask out or include the human's prompt from the training labels +train_on_inputs: false +# Group similarly sized data to minimize padding. +# May be slower to start, as it must download and sort the entire dataset. +# Note that training loss may have an oscillating pattern with this enabled. +group_by_length: false + +# Whether to use gradient checkpointing. Available options are: true, false, "offload", "offload_disk". +# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing +gradient_checkpointing: false +# additional kwargs to pass to the trainer for gradient checkpointing +# gradient_checkpointing_kwargs: +# use_reentrant: true + +# Stop training after this many evaluation losses have increased in a row +# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback +early_stopping_patience: 3 + +# Specify a scheduler and kwargs to use with the optimizer +# Valid values are driven by the Transformers SchedulerType class, see: +# https://github.com/huggingface/transformers/blob/5f4ecf2d9f867a1255131d2461d75793c0cf1db2/src/transformers/trainer_utils.py#L420 +# Valid values include +# - 'linear' +# - 'cosine' (default) +# - 'cosine_with_restarts' +# - 'polynomial' +# - 'constant' +# - 'constant_with_warmup' +# - 'inverse_sqrt' +# - 'reduce_lr_on_plateau' +# - 'cosine_with_min_lr' +# - 'warmup_stable_decay' + +# Additional schedulers include: +# - 'one_cycle' +# - 'rex' +lr_scheduler: +lr_scheduler_kwargs: +cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr +cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf) + +# For one_cycle optim +lr_div_factor: # Learning rate div factor + +# Specify optimizer +# Valid values are driven by the Transformers OptimizerNames class, see: +# https://github.com/huggingface/transformers/blob/cbf924b76c03828101a34069a96d209314114fd5/src/transformers/training_args.py#L144-L189 +# +# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of +# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used +# in the examples/ for your model and fine-tuning use case. +# +# Valid values for 'optimizer' include: +# - adamw_torch +# - adamw_torch_fused (default) +# - adamw_torch_xla +# - adamw_torch_npu_fused +# - adamw_apex_fused +# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1) +# - adafactor +# - adamw_anyprecision +# - adamw_torch_4bit +# - ademamix +# - sgd +# - adagrad +# - adamw_bnb_8bit +# - adamw_8bit # alias for adamw_bnb_8bit +# - ademamix_8bit +# - lion_8bit +# - lion_32bit +# - paged_adamw_32bit +# - paged_adamw_8bit +# - paged_ademamix_32bit +# - paged_ademamix_8bit +# - paged_lion_32bit +# - paged_lion_8bit +# - rmsprop +# - rmsprop_bnb +# - rmsprop_bnb_8bit +# - rmsprop_bnb_32bit +# - galore_adamw +# - galore_adamw_8bit +# - galore_adafactor +# - galore_adamw_layerwise +# - galore_adamw_8bit_layerwise +# - galore_adafactor_layerwise +# - lomo +# - adalomo +# - grokadamw +# - schedule_free_adamw +# - schedule_free_sgd +# - apollo_adamw +# - apollo_adamw_layerwise +# +# Additional custom optimizers include: +# - optimi_adamw +# - ao_adamw_8bit +# - ao_adamw_fp8 +# - came_pytorch +optimizer: +# Dictionary of arguments to pass to the optimizer +optim_args: +# For Galore Optimizers the following optim_args are available +# rank: # type: int +# update_proj_gap # type: int +# scale # type: float +# proj_type: # type: str, default = std + +# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm +optim_target_modules: +# - self_attn # for llama +# - mlp + +# Specify weight decay +weight_decay: +# adamw hyperparams +adam_beta1: +adam_beta2: +adam_beta3: # only used for CAME Optimizer +adam_epsilon: +adam_epsilon2: # only used for CAME Optimizer +# Gradient clipping max norm +max_grad_norm: + +# Augmentation techniques +# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings +# currently only supported on Llama and Mistral +neftune_noise_alpha: + +# Optional[bool]. Whether to bettertransformers +flash_optimum: + +# Note: Only one of the following attention patches can be used at a time. +# For example, if you set `xformers_attention` to `true`, do not set `flash_attention` to `true`. + +# Optional[bool]. Whether to use xformers attention patch https://github.com/facebookresearch/xformers: +xformers_attention: +# Optional[bool]. Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention: +flash_attention: +flash_attn_cross_entropy: # Optional[bool]. Whether to use flash-attention cross entropy implementation - advanced use only +flash_attn_rms_norm: # Optional[bool]. Whether to use flash-attention rms norm implementation - advanced use only +flash_attn_fuse_qkv: # Optional[bool]. Whether to fuse QKV into a single operation +flash_attn_fuse_mlp: # Optional[bool]. Whether to fuse part of the MLP into a single operation +# Optional[bool]. Whether to use scaled-dot-product attention +# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html +sdp_attention: +# Optional[bool]. Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf +s2_attention: + +# Optional[bool]. Whether to use low_cpu_mem_usage +low_cpu_mem_usage: +# Optional[str]. Resume from a specific checkpoint dir +resume_from_checkpoint: +# Optional[bool]. If resume_from_checkpoint isn't set and you simply want it to start where it left off. +# Be careful with this being turned on between different models. +auto_resume_from_checkpoints: false + +## Multimodal section +# int | tuple[int, int] | None . Size to resize images to, width x height. +# Will read from model/processor config if not set. +image_size: +# str. Algorithm to use for image resizing. "bilinear", "bicubic", "lanczos". Default is "bilinear". +image_resize_algorithm: 'bilinear' +## End of multimodal section + +# Don't mess with this, it's here for accelerate and torchrun +local_rank: + +# Add or change special tokens. +# If you add tokens here, you don't need to add them to the `tokens` list. +special_tokens: + # bos_token: "<s>" + # eos_token: "</s>" + # unk_token: "<unk>" + # pad_token: "[PAD]" + +# Optional[list[str]]. Add extra tokens to the tokenizer. +tokens: + # - "<|startoftext|>" + # - "<|endoftext|>" + +# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer. +# Only works for tokens that are not part of the base vocab (aka are added_tokens). +# Can be checked if they exist in tokenizer.json added_tokens. +added_tokens_overrides: # Dict[int, str] +# 128041: "<|im_start|>" +# 128042: "<|im_end|>" + +# FSDP +fsdp: +fsdp_config: + +# Deepspeed config path. e.g., deepspeed_configs/zero3.json +deepspeed: + +# Advanced DDP Arguments +ddp_timeout: +ddp_bucket_cap_mb: +ddp_broadcast_buffers: + +# Sequence parallelism +# Set to a divisor of the number of GPUs available to split sequences into chunks of equal size. +# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM. +# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized +# subsequences, or set to 4 to split into four equal-sized subsequences. +# See https://docs.axolotl.ai/docs/sequence_parallelism.html for more details. +sequence_parallel_degree: +# Optional; strides across the key dimension. Larger values use more memory but should make training faster. +# Must evenly divide the number of KV heads in your model. +heads_k_stride: 1 +# One of "varlen_llama3", "batch_ring", "batch_zigzag", "batch_stripe". Defaults to "varlen_llama3" +# in the sample packing case, and "batch_ring" in the non-sample packing case. +ring_attn_func: + +# Path to torch distx for optim 'adamw_anyprecision' +torchdistx_path: + +# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize +pretraining_dataset: + +# Debug mode +debug: + +# Seed +seed: + +# Allow overwrite yml config using from cli +strict: diff --git a/search.json b/search.json index 948ea715b..4f022d038 100644 --- a/search.json +++ b/search.json @@ -912,28 +912,28 @@ "href": "docs/api/utils.collators.mm_chat.html", "title": "utils.collators.mm_chat", "section": "", - "text": "utils.collators.mm_chat\nCollators for multi-modal chat messages and packing\n\n\n\n\n\nName\nDescription\n\n\n\n\nMultiModalChatDataCollator\nCollator for multi-modal chat messages\n\n\n\n\n\nutils.collators.mm_chat.MultiModalChatDataCollator(\n self,\n tokenizer,\n processing_strategy,\n packing=False,\n return_tensors='pt',\n padding=True,\n pad_to_multiple_of=None,\n)\nCollator for multi-modal chat messages" + "text": "utils.collators.mm_chat\nCollators for multi-modal chat messages and packing\n\n\n\n\n\nName\nDescription\n\n\n\n\nMultiModalChatDataCollator\nCollator for multi-modal chat messages\n\n\n\n\n\nutils.collators.mm_chat.MultiModalChatDataCollator(\n tokenizer,\n processing_strategy,\n packing=False,\n return_tensors='pt',\n padding=True,\n pad_to_multiple_of=None,\n)\nCollator for multi-modal chat messages" }, { "objectID": "docs/api/utils.collators.mm_chat.html#classes", "href": "docs/api/utils.collators.mm_chat.html#classes", "title": "utils.collators.mm_chat", "section": "", - "text": "Name\nDescription\n\n\n\n\nMultiModalChatDataCollator\nCollator for multi-modal chat messages\n\n\n\n\n\nutils.collators.mm_chat.MultiModalChatDataCollator(\n self,\n tokenizer,\n processing_strategy,\n packing=False,\n return_tensors='pt',\n padding=True,\n pad_to_multiple_of=None,\n)\nCollator for multi-modal chat messages" + "text": "Name\nDescription\n\n\n\n\nMultiModalChatDataCollator\nCollator for multi-modal chat messages\n\n\n\n\n\nutils.collators.mm_chat.MultiModalChatDataCollator(\n tokenizer,\n processing_strategy,\n packing=False,\n return_tensors='pt',\n padding=True,\n pad_to_multiple_of=None,\n)\nCollator for multi-modal chat messages" }, { "objectID": "docs/api/prompt_strategies.pygmalion.html", "href": "docs/api/prompt_strategies.pygmalion.html", "title": "prompt_strategies.pygmalion", "section": "", - "text": "prompt_strategies.pygmalion\nModule containing the PygmalionPromptTokenizingStrategy and PygmalionPrompter class\n\n\n\n\n\nName\nDescription\n\n\n\n\nPygmalionPromptTokenizingStrategy\nTokenizing strategy for Pygmalion.\n\n\nPygmalionPrompter\nPrompter for Pygmalion.\n\n\n\n\n\nprompt_strategies.pygmalion.PygmalionPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n *args,\n **kwargs,\n)\nTokenizing strategy for Pygmalion.\n\n\n\nprompt_strategies.pygmalion.PygmalionPrompter(self, *args, **kwargs)\nPrompter for Pygmalion." + "text": "prompt_strategies.pygmalion\nModule containing the PygmalionPromptTokenizingStrategy and PygmalionPrompter class\n\n\n\n\n\nName\nDescription\n\n\n\n\nPygmalionPromptTokenizingStrategy\nTokenizing strategy for Pygmalion.\n\n\nPygmalionPrompter\nPrompter for Pygmalion.\n\n\n\n\n\nprompt_strategies.pygmalion.PygmalionPromptTokenizingStrategy(\n prompter,\n tokenizer,\n *args,\n **kwargs,\n)\nTokenizing strategy for Pygmalion.\n\n\n\nprompt_strategies.pygmalion.PygmalionPrompter(*args, **kwargs)\nPrompter for Pygmalion." }, { "objectID": "docs/api/prompt_strategies.pygmalion.html#classes", "href": "docs/api/prompt_strategies.pygmalion.html#classes", "title": "prompt_strategies.pygmalion", "section": "", - "text": "Name\nDescription\n\n\n\n\nPygmalionPromptTokenizingStrategy\nTokenizing strategy for Pygmalion.\n\n\nPygmalionPrompter\nPrompter for Pygmalion.\n\n\n\n\n\nprompt_strategies.pygmalion.PygmalionPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n *args,\n **kwargs,\n)\nTokenizing strategy for Pygmalion.\n\n\n\nprompt_strategies.pygmalion.PygmalionPrompter(self, *args, **kwargs)\nPrompter for Pygmalion." + "text": "Name\nDescription\n\n\n\n\nPygmalionPromptTokenizingStrategy\nTokenizing strategy for Pygmalion.\n\n\nPygmalionPrompter\nPrompter for Pygmalion.\n\n\n\n\n\nprompt_strategies.pygmalion.PygmalionPromptTokenizingStrategy(\n prompter,\n tokenizer,\n *args,\n **kwargs,\n)\nTokenizing strategy for Pygmalion.\n\n\n\nprompt_strategies.pygmalion.PygmalionPrompter(*args, **kwargs)\nPrompter for Pygmalion." }, { "objectID": "docs/api/cli.cloud.base.html", @@ -1038,56 +1038,56 @@ "href": "docs/api/core.trainers.trl.html", "title": "core.trainers.trl", "section": "", - "text": "core.trainers.trl\nModule for TRL PPO trainer\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlCPOTrainer\nExtend the base CPOTrainer for axolotl helpers\n\n\nAxolotlKTOTrainer\nExtend the base KTOTrainer for axolotl helpers\n\n\nAxolotlORPOTrainer\nExtend the base ORPOTrainer for axolotl helpers\n\n\nAxolotlPRMTrainer\nExtend the base trl.PRMTrainer for axolotl helpers\n\n\nAxolotlRewardTrainer\nExtend the base RewardTrainer for axolotl helpers\n\n\nTRLPPOTrainer\nWrapper for TRL PPO trainer to handle customizations\n\n\n\n\n\ncore.trainers.trl.AxolotlCPOTrainer()\nExtend the base CPOTrainer for axolotl helpers\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_batch_loss_metrics\nCompute the CPO loss and other metrics for the given batch of inputs for train or test.\n\n\n\n\n\ncore.trainers.trl.AxolotlCPOTrainer.get_batch_loss_metrics(\n model,\n batch,\n train_eval='train',\n)\nCompute the CPO loss and other metrics for the given batch of inputs for train or test.\n\n\n\n\n\ncore.trainers.trl.AxolotlKTOTrainer()\nExtend the base KTOTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.AxolotlORPOTrainer()\nExtend the base ORPOTrainer for axolotl helpers\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_batch_loss_metrics\nCompute the ORPO loss and other metrics for the given batch of inputs for train or test.\n\n\n\n\n\ncore.trainers.trl.AxolotlORPOTrainer.get_batch_loss_metrics(\n model,\n batch,\n train_eval='train',\n)\nCompute the ORPO loss and other metrics for the given batch of inputs for train or test.\n\n\n\n\n\ncore.trainers.trl.AxolotlPRMTrainer()\nExtend the base trl.PRMTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.AxolotlRewardTrainer()\nExtend the base RewardTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.TRLPPOTrainer()\nWrapper for TRL PPO trainer to handle customizations" + "text": "core.trainers.trl\nModule for TRL PPO trainer\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlCPOTrainer\nExtend the base CPOTrainer for axolotl helpers\n\n\nAxolotlKTOTrainer\nExtend the base KTOTrainer for axolotl helpers\n\n\nAxolotlORPOTrainer\nExtend the base ORPOTrainer for axolotl helpers\n\n\nAxolotlPRMTrainer\nExtend the base trl.PRMTrainer for axolotl helpers\n\n\nAxolotlRewardTrainer\nExtend the base RewardTrainer for axolotl helpers\n\n\nTRLPPOTrainer\nWrapper for TRL PPO trainer to handle customizations\n\n\n\n\n\ncore.trainers.trl.AxolotlCPOTrainer(*args, **kwargs)\nExtend the base CPOTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.AxolotlKTOTrainer(*args, **kwargs)\nExtend the base KTOTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.AxolotlORPOTrainer(*args, **kwargs)\nExtend the base ORPOTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.AxolotlPRMTrainer(*args, **kwargs)\nExtend the base trl.PRMTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.AxolotlRewardTrainer(*args, **kwargs)\nExtend the base RewardTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.TRLPPOTrainer()\nWrapper for TRL PPO trainer to handle customizations" }, { "objectID": "docs/api/core.trainers.trl.html#classes", "href": "docs/api/core.trainers.trl.html#classes", "title": "core.trainers.trl", "section": "", - "text": "Name\nDescription\n\n\n\n\nAxolotlCPOTrainer\nExtend the base CPOTrainer for axolotl helpers\n\n\nAxolotlKTOTrainer\nExtend the base KTOTrainer for axolotl helpers\n\n\nAxolotlORPOTrainer\nExtend the base ORPOTrainer for axolotl helpers\n\n\nAxolotlPRMTrainer\nExtend the base trl.PRMTrainer for axolotl helpers\n\n\nAxolotlRewardTrainer\nExtend the base RewardTrainer for axolotl helpers\n\n\nTRLPPOTrainer\nWrapper for TRL PPO trainer to handle customizations\n\n\n\n\n\ncore.trainers.trl.AxolotlCPOTrainer()\nExtend the base CPOTrainer for axolotl helpers\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_batch_loss_metrics\nCompute the CPO loss and other metrics for the given batch of inputs for train or test.\n\n\n\n\n\ncore.trainers.trl.AxolotlCPOTrainer.get_batch_loss_metrics(\n model,\n batch,\n train_eval='train',\n)\nCompute the CPO loss and other metrics for the given batch of inputs for train or test.\n\n\n\n\n\ncore.trainers.trl.AxolotlKTOTrainer()\nExtend the base KTOTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.AxolotlORPOTrainer()\nExtend the base ORPOTrainer for axolotl helpers\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_batch_loss_metrics\nCompute the ORPO loss and other metrics for the given batch of inputs for train or test.\n\n\n\n\n\ncore.trainers.trl.AxolotlORPOTrainer.get_batch_loss_metrics(\n model,\n batch,\n train_eval='train',\n)\nCompute the ORPO loss and other metrics for the given batch of inputs for train or test.\n\n\n\n\n\ncore.trainers.trl.AxolotlPRMTrainer()\nExtend the base trl.PRMTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.AxolotlRewardTrainer()\nExtend the base RewardTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.TRLPPOTrainer()\nWrapper for TRL PPO trainer to handle customizations" + "text": "Name\nDescription\n\n\n\n\nAxolotlCPOTrainer\nExtend the base CPOTrainer for axolotl helpers\n\n\nAxolotlKTOTrainer\nExtend the base KTOTrainer for axolotl helpers\n\n\nAxolotlORPOTrainer\nExtend the base ORPOTrainer for axolotl helpers\n\n\nAxolotlPRMTrainer\nExtend the base trl.PRMTrainer for axolotl helpers\n\n\nAxolotlRewardTrainer\nExtend the base RewardTrainer for axolotl helpers\n\n\nTRLPPOTrainer\nWrapper for TRL PPO trainer to handle customizations\n\n\n\n\n\ncore.trainers.trl.AxolotlCPOTrainer(*args, **kwargs)\nExtend the base CPOTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.AxolotlKTOTrainer(*args, **kwargs)\nExtend the base KTOTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.AxolotlORPOTrainer(*args, **kwargs)\nExtend the base ORPOTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.AxolotlPRMTrainer(*args, **kwargs)\nExtend the base trl.PRMTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.AxolotlRewardTrainer(*args, **kwargs)\nExtend the base RewardTrainer for axolotl helpers\n\n\n\ncore.trainers.trl.TRLPPOTrainer()\nWrapper for TRL PPO trainer to handle customizations" }, { "objectID": "docs/api/utils.collators.batching.html", "href": "docs/api/utils.collators.batching.html", "title": "utils.collators.batching", "section": "", - "text": "utils.collators.batching\nData collators for axolotl to pad labels and position_ids for packed sequences\n\n\n\n\n\nName\nDescription\n\n\n\n\nBatchSamplerDataCollatorForSeq2Seq\nCollator for multipack specific to the using the BatchSampler\n\n\nDataCollatorForSeq2Seq\nData collator that will dynamically pad the inputs received, as well as the labels and position_ids\n\n\nPretrainingBatchSamplerDataCollatorForSeq2Seq\nCollator for multipack specific to the using the BatchSampler\n\n\nV2BatchSamplerDataCollatorForSeq2Seq\nCollator for multipack specific to the using the BatchSampler\n\n\n\n\n\nutils.collators.batching.BatchSamplerDataCollatorForSeq2Seq(\n self,\n tokenizer,\n model=None,\n padding=True,\n max_length=None,\n pad_to_multiple_of=None,\n label_pad_token_id=-100,\n position_pad_token_id=0,\n return_tensors='pt',\n)\nCollator for multipack specific to the using the BatchSampler\n\n\n\nutils.collators.batching.DataCollatorForSeq2Seq(\n self,\n tokenizer,\n model=None,\n padding=True,\n max_length=None,\n pad_to_multiple_of=None,\n label_pad_token_id=-100,\n position_pad_token_id=0,\n return_tensors='pt',\n)\nData collator that will dynamically pad the inputs received, as well as the labels and position_ids\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntokenizer\n[PreTrainedTokenizer] or [PreTrainedTokenizerFast]\nThe tokenizer used for encoding the data.\nrequired\n\n\nmodel\n[PreTrainedModel]\nThe model that is being trained. If set and has the prepare_decoder_input_ids_from_labels, use it to prepare the decoder_input_ids This is useful when using label_smoothing to avoid calculating loss twice.\nNone\n\n\npadding\nbool, str or [~utils.PaddingStrategy], optional, defaults to True\nSelect a strategy to pad the returned sequences (according to the model’s padding side and padding index) among: - True or 'longest' (default): Pad to the longest sequence in the batch (or no padding if only a single sequence is provided). - 'max_length': Pad to a maximum length specified with the argument max_length or to the maximum acceptable input length for the model if that argument is not provided. - False or 'do_not_pad': No padding (i.e., can output a batch with sequences of different lengths).\nTrue\n\n\nmax_length\nint, optional\nMaximum length of the returned list and optionally padding length (see above).\nNone\n\n\npad_to_multiple_of\nint, optional\nIf set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).\nNone\n\n\nlabel_pad_token_id\nint, optional, defaults to -100\nThe id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).\n-100\n\n\nreturn_tensors\nstr\nThe type of Tensor to return. Allowable values are “np”, “pt” and “tf”.\n'pt'\n\n\n\n\n\n\n\nutils.collators.batching.PretrainingBatchSamplerDataCollatorForSeq2Seq(\n self,\n *args,\n multipack_attn=True,\n **kwargs,\n)\nCollator for multipack specific to the using the BatchSampler\n\n\n\nutils.collators.batching.V2BatchSamplerDataCollatorForSeq2Seq(\n self,\n tokenizer,\n model=None,\n padding=True,\n max_length=None,\n pad_to_multiple_of=None,\n label_pad_token_id=-100,\n position_pad_token_id=0,\n return_tensors='pt',\n)\nCollator for multipack specific to the using the BatchSampler" + "text": "utils.collators.batching\nData collators for axolotl to pad labels and position_ids for packed sequences\n\n\n\n\n\nName\nDescription\n\n\n\n\nBatchSamplerDataCollatorForSeq2Seq\nCollator for multipack specific to the using the BatchSampler\n\n\nDataCollatorForSeq2Seq\nData collator that will dynamically pad the inputs received, as well as the labels and position_ids\n\n\nPretrainingBatchSamplerDataCollatorForSeq2Seq\nCollator for multipack specific to the using the BatchSampler\n\n\nV2BatchSamplerDataCollatorForSeq2Seq\nCollator for multipack specific to the using the BatchSampler\n\n\n\n\n\nutils.collators.batching.BatchSamplerDataCollatorForSeq2Seq(\n tokenizer,\n model=None,\n padding=True,\n max_length=None,\n pad_to_multiple_of=None,\n label_pad_token_id=-100,\n position_pad_token_id=0,\n return_tensors='pt',\n)\nCollator for multipack specific to the using the BatchSampler\n\n\n\nutils.collators.batching.DataCollatorForSeq2Seq(\n tokenizer,\n model=None,\n padding=True,\n max_length=None,\n pad_to_multiple_of=None,\n label_pad_token_id=-100,\n position_pad_token_id=0,\n return_tensors='pt',\n)\nData collator that will dynamically pad the inputs received, as well as the labels and position_ids\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntokenizer\n[PreTrainedTokenizer] or [PreTrainedTokenizerFast]\nThe tokenizer used for encoding the data.\nrequired\n\n\nmodel\n[PreTrainedModel]\nThe model that is being trained. If set and has the prepare_decoder_input_ids_from_labels, use it to prepare the decoder_input_ids This is useful when using label_smoothing to avoid calculating loss twice.\nNone\n\n\npadding\nbool, str or [~utils.PaddingStrategy], optional, defaults to True\nSelect a strategy to pad the returned sequences (according to the model’s padding side and padding index) among: - True or 'longest' (default): Pad to the longest sequence in the batch (or no padding if only a single sequence is provided). - 'max_length': Pad to a maximum length specified with the argument max_length or to the maximum acceptable input length for the model if that argument is not provided. - False or 'do_not_pad': No padding (i.e., can output a batch with sequences of different lengths).\nTrue\n\n\nmax_length\nint, optional\nMaximum length of the returned list and optionally padding length (see above).\nNone\n\n\npad_to_multiple_of\nint, optional\nIf set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).\nNone\n\n\nlabel_pad_token_id\nint, optional, defaults to -100\nThe id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).\n-100\n\n\nreturn_tensors\nstr\nThe type of Tensor to return. Allowable values are “np”, “pt” and “tf”.\n'pt'\n\n\n\n\n\n\n\nutils.collators.batching.PretrainingBatchSamplerDataCollatorForSeq2Seq(\n *args,\n multipack_attn=True,\n **kwargs,\n)\nCollator for multipack specific to the using the BatchSampler\n\n\n\nutils.collators.batching.V2BatchSamplerDataCollatorForSeq2Seq(\n tokenizer,\n model=None,\n padding=True,\n max_length=None,\n pad_to_multiple_of=None,\n label_pad_token_id=-100,\n position_pad_token_id=0,\n return_tensors='pt',\n)\nCollator for multipack specific to the using the BatchSampler" }, { "objectID": "docs/api/utils.collators.batching.html#classes", "href": "docs/api/utils.collators.batching.html#classes", "title": "utils.collators.batching", "section": "", - "text": "Name\nDescription\n\n\n\n\nBatchSamplerDataCollatorForSeq2Seq\nCollator for multipack specific to the using the BatchSampler\n\n\nDataCollatorForSeq2Seq\nData collator that will dynamically pad the inputs received, as well as the labels and position_ids\n\n\nPretrainingBatchSamplerDataCollatorForSeq2Seq\nCollator for multipack specific to the using the BatchSampler\n\n\nV2BatchSamplerDataCollatorForSeq2Seq\nCollator for multipack specific to the using the BatchSampler\n\n\n\n\n\nutils.collators.batching.BatchSamplerDataCollatorForSeq2Seq(\n self,\n tokenizer,\n model=None,\n padding=True,\n max_length=None,\n pad_to_multiple_of=None,\n label_pad_token_id=-100,\n position_pad_token_id=0,\n return_tensors='pt',\n)\nCollator for multipack specific to the using the BatchSampler\n\n\n\nutils.collators.batching.DataCollatorForSeq2Seq(\n self,\n tokenizer,\n model=None,\n padding=True,\n max_length=None,\n pad_to_multiple_of=None,\n label_pad_token_id=-100,\n position_pad_token_id=0,\n return_tensors='pt',\n)\nData collator that will dynamically pad the inputs received, as well as the labels and position_ids\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntokenizer\n[PreTrainedTokenizer] or [PreTrainedTokenizerFast]\nThe tokenizer used for encoding the data.\nrequired\n\n\nmodel\n[PreTrainedModel]\nThe model that is being trained. If set and has the prepare_decoder_input_ids_from_labels, use it to prepare the decoder_input_ids This is useful when using label_smoothing to avoid calculating loss twice.\nNone\n\n\npadding\nbool, str or [~utils.PaddingStrategy], optional, defaults to True\nSelect a strategy to pad the returned sequences (according to the model’s padding side and padding index) among: - True or 'longest' (default): Pad to the longest sequence in the batch (or no padding if only a single sequence is provided). - 'max_length': Pad to a maximum length specified with the argument max_length or to the maximum acceptable input length for the model if that argument is not provided. - False or 'do_not_pad': No padding (i.e., can output a batch with sequences of different lengths).\nTrue\n\n\nmax_length\nint, optional\nMaximum length of the returned list and optionally padding length (see above).\nNone\n\n\npad_to_multiple_of\nint, optional\nIf set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).\nNone\n\n\nlabel_pad_token_id\nint, optional, defaults to -100\nThe id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).\n-100\n\n\nreturn_tensors\nstr\nThe type of Tensor to return. Allowable values are “np”, “pt” and “tf”.\n'pt'\n\n\n\n\n\n\n\nutils.collators.batching.PretrainingBatchSamplerDataCollatorForSeq2Seq(\n self,\n *args,\n multipack_attn=True,\n **kwargs,\n)\nCollator for multipack specific to the using the BatchSampler\n\n\n\nutils.collators.batching.V2BatchSamplerDataCollatorForSeq2Seq(\n self,\n tokenizer,\n model=None,\n padding=True,\n max_length=None,\n pad_to_multiple_of=None,\n label_pad_token_id=-100,\n position_pad_token_id=0,\n return_tensors='pt',\n)\nCollator for multipack specific to the using the BatchSampler" + "text": "Name\nDescription\n\n\n\n\nBatchSamplerDataCollatorForSeq2Seq\nCollator for multipack specific to the using the BatchSampler\n\n\nDataCollatorForSeq2Seq\nData collator that will dynamically pad the inputs received, as well as the labels and position_ids\n\n\nPretrainingBatchSamplerDataCollatorForSeq2Seq\nCollator for multipack specific to the using the BatchSampler\n\n\nV2BatchSamplerDataCollatorForSeq2Seq\nCollator for multipack specific to the using the BatchSampler\n\n\n\n\n\nutils.collators.batching.BatchSamplerDataCollatorForSeq2Seq(\n tokenizer,\n model=None,\n padding=True,\n max_length=None,\n pad_to_multiple_of=None,\n label_pad_token_id=-100,\n position_pad_token_id=0,\n return_tensors='pt',\n)\nCollator for multipack specific to the using the BatchSampler\n\n\n\nutils.collators.batching.DataCollatorForSeq2Seq(\n tokenizer,\n model=None,\n padding=True,\n max_length=None,\n pad_to_multiple_of=None,\n label_pad_token_id=-100,\n position_pad_token_id=0,\n return_tensors='pt',\n)\nData collator that will dynamically pad the inputs received, as well as the labels and position_ids\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntokenizer\n[PreTrainedTokenizer] or [PreTrainedTokenizerFast]\nThe tokenizer used for encoding the data.\nrequired\n\n\nmodel\n[PreTrainedModel]\nThe model that is being trained. If set and has the prepare_decoder_input_ids_from_labels, use it to prepare the decoder_input_ids This is useful when using label_smoothing to avoid calculating loss twice.\nNone\n\n\npadding\nbool, str or [~utils.PaddingStrategy], optional, defaults to True\nSelect a strategy to pad the returned sequences (according to the model’s padding side and padding index) among: - True or 'longest' (default): Pad to the longest sequence in the batch (or no padding if only a single sequence is provided). - 'max_length': Pad to a maximum length specified with the argument max_length or to the maximum acceptable input length for the model if that argument is not provided. - False or 'do_not_pad': No padding (i.e., can output a batch with sequences of different lengths).\nTrue\n\n\nmax_length\nint, optional\nMaximum length of the returned list and optionally padding length (see above).\nNone\n\n\npad_to_multiple_of\nint, optional\nIf set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).\nNone\n\n\nlabel_pad_token_id\nint, optional, defaults to -100\nThe id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).\n-100\n\n\nreturn_tensors\nstr\nThe type of Tensor to return. Allowable values are “np”, “pt” and “tf”.\n'pt'\n\n\n\n\n\n\n\nutils.collators.batching.PretrainingBatchSamplerDataCollatorForSeq2Seq(\n *args,\n multipack_attn=True,\n **kwargs,\n)\nCollator for multipack specific to the using the BatchSampler\n\n\n\nutils.collators.batching.V2BatchSamplerDataCollatorForSeq2Seq(\n tokenizer,\n model=None,\n padding=True,\n max_length=None,\n pad_to_multiple_of=None,\n label_pad_token_id=-100,\n position_pad_token_id=0,\n return_tensors='pt',\n)\nCollator for multipack specific to the using the BatchSampler" }, { "objectID": "docs/api/prompt_strategies.alpaca_w_system.html", "href": "docs/api/prompt_strategies.alpaca_w_system.html", "title": "prompt_strategies.alpaca_w_system", "section": "", - "text": "prompt_strategies.alpaca_w_system\nPrompt strategies loader for alpaca instruction datasets with system prompts\n\n\n\n\n\nName\nDescription\n\n\n\n\nInstructionWSystemPromptTokenizingStrategy\nTokenizing strategy for instruction-based prompts.\n\n\nOpenOrcaPromptTokenizingStrategy\nTokenizing strategy for OpenOrca datasets\n\n\nOpenOrcaSystemDataPrompter\nAlpaca Style Prompter that uses system prompts from the dataset, with OpenOrca prompts\n\n\nSystemDataPrompter\nAlpaca Style Prompter that uses system prompts from the dataset\n\n\n\n\n\nprompt_strategies.alpaca_w_system.InstructionWSystemPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for instruction-based prompts.\n\n\n\nprompt_strategies.alpaca_w_system.OpenOrcaPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for OpenOrca datasets\n\n\n\nprompt_strategies.alpaca_w_system.OpenOrcaSystemDataPrompter(\n self,\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAlpaca Style Prompter that uses system prompts from the dataset, with OpenOrca prompts\n\n\n\nprompt_strategies.alpaca_w_system.SystemDataPrompter(\n self,\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAlpaca Style Prompter that uses system prompts from the dataset" + "text": "prompt_strategies.alpaca_w_system\nPrompt strategies loader for alpaca instruction datasets with system prompts\n\n\n\n\n\nName\nDescription\n\n\n\n\nInstructionWSystemPromptTokenizingStrategy\nTokenizing strategy for instruction-based prompts.\n\n\nOpenOrcaPromptTokenizingStrategy\nTokenizing strategy for OpenOrca datasets\n\n\nOpenOrcaSystemDataPrompter\nAlpaca Style Prompter that uses system prompts from the dataset, with OpenOrca prompts\n\n\nSystemDataPrompter\nAlpaca Style Prompter that uses system prompts from the dataset\n\n\n\n\n\nprompt_strategies.alpaca_w_system.InstructionWSystemPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for instruction-based prompts.\n\n\n\nprompt_strategies.alpaca_w_system.OpenOrcaPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for OpenOrca datasets\n\n\n\nprompt_strategies.alpaca_w_system.OpenOrcaSystemDataPrompter(\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAlpaca Style Prompter that uses system prompts from the dataset, with OpenOrca prompts\n\n\n\nprompt_strategies.alpaca_w_system.SystemDataPrompter(\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAlpaca Style Prompter that uses system prompts from the dataset" }, { "objectID": "docs/api/prompt_strategies.alpaca_w_system.html#classes", "href": "docs/api/prompt_strategies.alpaca_w_system.html#classes", "title": "prompt_strategies.alpaca_w_system", "section": "", - "text": "Name\nDescription\n\n\n\n\nInstructionWSystemPromptTokenizingStrategy\nTokenizing strategy for instruction-based prompts.\n\n\nOpenOrcaPromptTokenizingStrategy\nTokenizing strategy for OpenOrca datasets\n\n\nOpenOrcaSystemDataPrompter\nAlpaca Style Prompter that uses system prompts from the dataset, with OpenOrca prompts\n\n\nSystemDataPrompter\nAlpaca Style Prompter that uses system prompts from the dataset\n\n\n\n\n\nprompt_strategies.alpaca_w_system.InstructionWSystemPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for instruction-based prompts.\n\n\n\nprompt_strategies.alpaca_w_system.OpenOrcaPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for OpenOrca datasets\n\n\n\nprompt_strategies.alpaca_w_system.OpenOrcaSystemDataPrompter(\n self,\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAlpaca Style Prompter that uses system prompts from the dataset, with OpenOrca prompts\n\n\n\nprompt_strategies.alpaca_w_system.SystemDataPrompter(\n self,\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAlpaca Style Prompter that uses system prompts from the dataset" + "text": "Name\nDescription\n\n\n\n\nInstructionWSystemPromptTokenizingStrategy\nTokenizing strategy for instruction-based prompts.\n\n\nOpenOrcaPromptTokenizingStrategy\nTokenizing strategy for OpenOrca datasets\n\n\nOpenOrcaSystemDataPrompter\nAlpaca Style Prompter that uses system prompts from the dataset, with OpenOrca prompts\n\n\nSystemDataPrompter\nAlpaca Style Prompter that uses system prompts from the dataset\n\n\n\n\n\nprompt_strategies.alpaca_w_system.InstructionWSystemPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for instruction-based prompts.\n\n\n\nprompt_strategies.alpaca_w_system.OpenOrcaPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for OpenOrca datasets\n\n\n\nprompt_strategies.alpaca_w_system.OpenOrcaSystemDataPrompter(\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAlpaca Style Prompter that uses system prompts from the dataset, with OpenOrca prompts\n\n\n\nprompt_strategies.alpaca_w_system.SystemDataPrompter(\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAlpaca Style Prompter that uses system prompts from the dataset" }, { "objectID": "docs/api/integrations.base.html", "href": "docs/api/integrations.base.html", "title": "integrations.base", "section": "", - "text": "integrations.base\nBase class for all plugins.\nA plugin is a reusable, modular, and self-contained piece of code that extends the functionality of Axolotl.\nPlugins can be used to integrate third-party models, modify the training process, or add new features.\nTo create a new plugin, you need to inherit from the BasePlugin class and implement the required methods.\n\n\n\n\n\nName\nDescription\n\n\n\n\nBaseOptimizerFactory\nBase class for factories to create custom optimizers\n\n\nBasePlugin\nBase class for all plugins. Defines the interface for plugin methods.\n\n\nPluginManager\nThe PluginManager class is responsible for loading and managing plugins. It\n\n\n\n\n\nintegrations.base.BaseOptimizerFactory()\nBase class for factories to create custom optimizers\n\n\n\nintegrations.base.BasePlugin(self)\nBase class for all plugins. Defines the interface for plugin methods.\nA plugin is a reusable, modular, and self-contained piece of code that extends\nthe functionality of Axolotl. Plugins can be used to integrate third-party models,\nmodify the training process, or add new features.\nTo create a new plugin, you need to inherit from the BasePlugin class and\nimplement the required methods.\n\n\nPlugin methods include:\n- register(cfg): Registers the plugin with the given configuration.\n- load_datasets(cfg): Loads and preprocesses the dataset for training.\n- pre_model_load(cfg): Performs actions before the model is loaded.\n- post_model_build(cfg, model): Performs actions after the model is loaded, but\nbefore LoRA adapters are applied.\n- pre_lora_load(cfg, model): Performs actions before LoRA weights are loaded.\n- post_lora_load(cfg, model): Performs actions after LoRA weights are loaded.\n- post_model_load(cfg, model): Performs actions after the model is loaded,\ninclusive of any adapters.\n- post_trainer_create(cfg, trainer): Performs actions after the trainer is\ncreated.\n- create_optimizer(cfg, trainer): Creates and returns an optimizer for training.\n- create_lr_scheduler(cfg, trainer, optimizer, num_training_steps): Creates and\nreturns a learning rate scheduler.\n- add_callbacks_pre_trainer(cfg, model): Adds callbacks to the trainer before\ntraining.\n- add_callbacks_post_trainer(cfg, trainer): Adds callbacks to the trainer after\ntraining.\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nadd_callbacks_post_trainer\nAdds callbacks to the trainer after creating the trainer. This is useful for\n\n\nadd_callbacks_pre_trainer\nSet up callbacks before creating the trainer.\n\n\ncreate_lr_scheduler\nCreates and returns a learning rate scheduler.\n\n\ncreate_optimizer\nCreates and returns an optimizer for training.\n\n\nget_input_args\nReturns a pydantic model for the plugin’s input arguments.\n\n\nget_trainer_cls\nReturns a custom class for the trainer.\n\n\nload_datasets\nLoads and preprocesses the dataset for training.\n\n\npost_lora_load\nPerforms actions after LoRA weights are loaded.\n\n\npost_model_build\nPerforms actions after the model is built/loaded, but before any adapters are applied.\n\n\npost_model_load\nPerforms actions after the model is loaded.\n\n\npost_train\nPerforms actions after training is complete.\n\n\npost_train_unload\nPerforms actions after training is complete and the model is unloaded.\n\n\npost_trainer_create\nPerforms actions after the trainer is created.\n\n\npre_lora_load\nPerforms actions before LoRA weights are loaded.\n\n\npre_model_load\nPerforms actions before the model is loaded.\n\n\nregister\nRegisters the plugin with the given configuration.\n\n\n\n\n\nintegrations.base.BasePlugin.add_callbacks_post_trainer(cfg, trainer)\nAdds callbacks to the trainer after creating the trainer. This is useful for\ncallbacks that require access to the model or trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.add_callbacks_pre_trainer(cfg, model)\nSet up callbacks before creating the trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added to the TrainingArgs.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.create_lr_scheduler(\n cfg,\n trainer,\n optimizer,\n num_training_steps,\n)\nCreates and returns a learning rate scheduler.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\noptimizer\nOptimizer\nThe optimizer for training.\nrequired\n\n\nnum_training_steps\nint\nTotal number of training steps\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nLRScheduler | None\nThe created learning rate scheduler.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.create_optimizer(cfg, trainer)\nCreates and returns an optimizer for training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nOptimizer | None\nThe created optimizer.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.get_input_args()\nReturns a pydantic model for the plugin’s input arguments.\n\n\n\nintegrations.base.BasePlugin.get_trainer_cls(cfg)\nReturns a custom class for the trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe global axolotl configuration.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainer | None\nThe first non-None trainer class returned by a plugin.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.load_datasets(cfg, preprocess=False)\nLoads and preprocesses the dataset for training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\npreprocess\nbool\nWhether this is the preprocess step of the datasets.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\ndataset_meta\nUnion['TrainDatasetMeta', None]\nThe metadata for the training dataset.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_lora_load(cfg, model)\nPerforms actions after LoRA weights are loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_model_build(cfg, model)\nPerforms actions after the model is built/loaded, but before any adapters are applied.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_model_load(cfg, model)\nPerforms actions after the model is loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_train(cfg, model)\nPerforms actions after training is complete.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe axolotl configuration.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_train_unload(cfg)\nPerforms actions after training is complete and the model is unloaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_trainer_create(cfg, trainer)\nPerforms actions after the trainer is created.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.pre_lora_load(cfg, model)\nPerforms actions before LoRA weights are loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.pre_model_load(cfg)\nPerforms actions before the model is loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.register(cfg)\nRegisters the plugin with the given configuration.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\n\n\nintegrations.base.PluginManager()\nThe PluginManager class is responsible for loading and managing plugins. It\nshould be a singleton so it can be accessed from anywhere in the codebase.\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nplugins\nOrderedDict[str, BasePlugin]\nA list of loaded plugins.\n\n\n\n\n\n\nKey methods include:\n- get_instance(): Static method to get the singleton instance of PluginManager.\n- register(plugin_name: str): Registers a new plugin by its name.\n- pre_model_load(cfg): Calls the pre_model_load method of all registered plugins.\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nadd_callbacks_post_trainer\nCalls the add_callbacks_post_trainer method of all registered plugins.\n\n\nadd_callbacks_pre_trainer\nCalls the add_callbacks_pre_trainer method of all registered plugins.\n\n\ncreate_lr_scheduler\nCalls the create_lr_scheduler method of all registered plugins and returns\n\n\ncreate_optimizer\nCalls the create_optimizer method of all registered plugins and returns\n\n\nget_input_args\nReturns a list of Pydantic classes for all registered plugins’ input arguments.’\n\n\nget_instance\nReturns the singleton instance of PluginManager. If the instance doesn’t\n\n\nget_trainer_cls\nCalls the get_trainer_cls method of all registered plugins and returns the\n\n\nload_datasets\nCalls the load_datasets method of each registered plugin.\n\n\npost_lora_load\nCalls the post_lora_load method of all registered plugins.\n\n\npost_model_build\nCalls the post_model_build method of all registered plugins after the\n\n\npost_model_load\nCalls the post_model_load method of all registered plugins after the model\n\n\npost_train\nCalls the post_train method of all registered plugins.\n\n\npost_train_unload\nCalls the post_train_unload method of all registered plugins.\n\n\npost_trainer_create\nCalls the post_trainer_create method of all registered plugins.\n\n\npre_lora_load\nCalls the pre_lora_load method of all registered plugins.\n\n\npre_model_load\nCalls the pre_model_load method of all registered plugins.\n\n\nregister\nRegisters a new plugin by its name.\n\n\n\n\n\nintegrations.base.PluginManager.add_callbacks_post_trainer(cfg, trainer)\nCalls the add_callbacks_post_trainer method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added to the TrainingArgs.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.add_callbacks_pre_trainer(cfg, model)\nCalls the add_callbacks_pre_trainer method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added to the TrainingArgs.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.create_lr_scheduler(\n trainer,\n optimizer,\n num_training_steps,\n)\nCalls the create_lr_scheduler method of all registered plugins and returns\nthe first non-None scheduler.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\noptimizer\nOptimizer\nThe optimizer for training.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nLRScheduler | None\nThe created learning rate scheduler, or None if not found.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.create_optimizer(trainer)\nCalls the create_optimizer method of all registered plugins and returns\nthe first non-None optimizer.\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nOptimizer | None\nThe created optimizer, or None if none was found.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.get_input_args()\nReturns a list of Pydantic classes for all registered plugins’ input arguments.’\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[str]\nA list of Pydantic classes for all registered plugins’ input arguments.’\n\n\n\n\n\n\n\nintegrations.base.PluginManager.get_instance()\nReturns the singleton instance of PluginManager. If the instance doesn’t\nexist, it creates a new one.\n\n\n\nintegrations.base.PluginManager.get_trainer_cls(cfg)\nCalls the get_trainer_cls method of all registered plugins and returns the\nfirst non-None trainer class.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainer | None\nThe first non-None trainer class returned by a plugin.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.load_datasets(cfg, preprocess=False)\nCalls the load_datasets method of each registered plugin.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\npreprocess\nbool\nWhether this is preprocess step of the datasets.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nUnion['TrainDatasetMeta', None]\nThe dataset metadata loaded from all registered plugins.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_lora_load(cfg, model)\nCalls the post_lora_load method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_model_build(cfg, model)\nCalls the post_model_build method of all registered plugins after the\nmodel has been built / loaded, but before any adapters have been applied.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_model_load(cfg, model)\nCalls the post_model_load method of all registered plugins after the model\nhas been loaded inclusive of any adapters.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_train(cfg, model)\nCalls the post_train method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_train_unload(cfg)\nCalls the post_train_unload method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_trainer_create(cfg, trainer)\nCalls the post_trainer_create method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.pre_lora_load(cfg, model)\nCalls the pre_lora_load method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.pre_model_load(cfg)\nCalls the pre_model_load method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.register(plugin_name)\nRegisters a new plugin by its name.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nplugin_name\nstr\nThe name of the plugin to be registered.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nImportError\nIf the plugin module cannot be imported.\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nload_plugin\nLoads a plugin based on the given plugin name.\n\n\n\n\n\nintegrations.base.load_plugin(plugin_name)\nLoads a plugin based on the given plugin name.\nThe plugin name should be in the format “module_name.class_name”. This function\nsplits the plugin name into module and class, imports the module, retrieves the\nclass from the module, and creates an instance of the class.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nplugin_name\nstr\nThe name of the plugin to be loaded. The name should be in the format “module_name.class_name”.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nBasePlugin\nAn instance of the loaded plugin.\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nImportError\nIf the plugin module cannot be imported." + "text": "integrations.base\nBase class for all plugins.\nA plugin is a reusable, modular, and self-contained piece of code that extends the functionality of Axolotl.\nPlugins can be used to integrate third-party models, modify the training process, or add new features.\nTo create a new plugin, you need to inherit from the BasePlugin class and implement the required methods.\n\n\n\n\n\nName\nDescription\n\n\n\n\nBaseOptimizerFactory\nBase class for factories to create custom optimizers\n\n\nBasePlugin\nBase class for all plugins. Defines the interface for plugin methods.\n\n\nPluginManager\nThe PluginManager class is responsible for loading and managing plugins. It\n\n\n\n\n\nintegrations.base.BaseOptimizerFactory()\nBase class for factories to create custom optimizers\n\n\n\nintegrations.base.BasePlugin()\nBase class for all plugins. Defines the interface for plugin methods.\nA plugin is a reusable, modular, and self-contained piece of code that extends\nthe functionality of Axolotl. Plugins can be used to integrate third-party models,\nmodify the training process, or add new features.\nTo create a new plugin, you need to inherit from the BasePlugin class and\nimplement the required methods.\n\n\nPlugin methods include:\n- register(cfg): Registers the plugin with the given configuration.\n- load_datasets(cfg): Loads and preprocesses the dataset for training.\n- pre_model_load(cfg): Performs actions before the model is loaded.\n- post_model_build(cfg, model): Performs actions after the model is loaded, but\nbefore LoRA adapters are applied.\n- pre_lora_load(cfg, model): Performs actions before LoRA weights are loaded.\n- post_lora_load(cfg, model): Performs actions after LoRA weights are loaded.\n- post_model_load(cfg, model): Performs actions after the model is loaded,\ninclusive of any adapters.\n- post_trainer_create(cfg, trainer): Performs actions after the trainer is\ncreated.\n- create_optimizer(cfg, trainer): Creates and returns an optimizer for training.\n- create_lr_scheduler(cfg, trainer, optimizer, num_training_steps): Creates and\nreturns a learning rate scheduler.\n- add_callbacks_pre_trainer(cfg, model): Adds callbacks to the trainer before\ntraining.\n- add_callbacks_post_trainer(cfg, trainer): Adds callbacks to the trainer after\ntraining.\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nadd_callbacks_post_trainer\nAdds callbacks to the trainer after creating the trainer. This is useful for\n\n\nadd_callbacks_pre_trainer\nSet up callbacks before creating the trainer.\n\n\ncreate_lr_scheduler\nCreates and returns a learning rate scheduler.\n\n\ncreate_optimizer\nCreates and returns an optimizer for training.\n\n\nget_input_args\nReturns a pydantic model for the plugin’s input arguments.\n\n\nget_trainer_cls\nReturns a custom class for the trainer.\n\n\nload_datasets\nLoads and preprocesses the dataset for training.\n\n\npost_lora_load\nPerforms actions after LoRA weights are loaded.\n\n\npost_model_build\nPerforms actions after the model is built/loaded, but before any adapters are applied.\n\n\npost_model_load\nPerforms actions after the model is loaded.\n\n\npost_train\nPerforms actions after training is complete.\n\n\npost_train_unload\nPerforms actions after training is complete and the model is unloaded.\n\n\npost_trainer_create\nPerforms actions after the trainer is created.\n\n\npre_lora_load\nPerforms actions before LoRA weights are loaded.\n\n\npre_model_load\nPerforms actions before the model is loaded.\n\n\nregister\nRegisters the plugin with the given configuration.\n\n\n\n\n\nintegrations.base.BasePlugin.add_callbacks_post_trainer(cfg, trainer)\nAdds callbacks to the trainer after creating the trainer. This is useful for\ncallbacks that require access to the model or trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.add_callbacks_pre_trainer(cfg, model)\nSet up callbacks before creating the trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added to the TrainingArgs.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.create_lr_scheduler(\n cfg,\n trainer,\n optimizer,\n num_training_steps,\n)\nCreates and returns a learning rate scheduler.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\noptimizer\nOptimizer\nThe optimizer for training.\nrequired\n\n\nnum_training_steps\nint\nTotal number of training steps\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nLRScheduler | None\nThe created learning rate scheduler.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.create_optimizer(cfg, trainer)\nCreates and returns an optimizer for training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nOptimizer | None\nThe created optimizer.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.get_input_args()\nReturns a pydantic model for the plugin’s input arguments.\n\n\n\nintegrations.base.BasePlugin.get_trainer_cls(cfg)\nReturns a custom class for the trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe global axolotl configuration.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainer | None\nThe first non-None trainer class returned by a plugin.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.load_datasets(cfg, preprocess=False)\nLoads and preprocesses the dataset for training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\npreprocess\nbool\nWhether this is the preprocess step of the datasets.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\ndataset_meta\nUnion['TrainDatasetMeta', None]\nThe metadata for the training dataset.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_lora_load(cfg, model)\nPerforms actions after LoRA weights are loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_model_build(cfg, model)\nPerforms actions after the model is built/loaded, but before any adapters are applied.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_model_load(cfg, model)\nPerforms actions after the model is loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_train(cfg, model)\nPerforms actions after training is complete.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe axolotl configuration.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_train_unload(cfg)\nPerforms actions after training is complete and the model is unloaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_trainer_create(cfg, trainer)\nPerforms actions after the trainer is created.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.pre_lora_load(cfg, model)\nPerforms actions before LoRA weights are loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.pre_model_load(cfg)\nPerforms actions before the model is loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.register(cfg)\nRegisters the plugin with the given configuration.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\n\n\nintegrations.base.PluginManager()\nThe PluginManager class is responsible for loading and managing plugins. It\nshould be a singleton so it can be accessed from anywhere in the codebase.\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nplugins\nOrderedDict[str, BasePlugin]\nA list of loaded plugins.\n\n\n\n\n\n\nKey methods include:\n- get_instance(): Static method to get the singleton instance of PluginManager.\n- register(plugin_name: str): Registers a new plugin by its name.\n- pre_model_load(cfg): Calls the pre_model_load method of all registered plugins.\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nadd_callbacks_post_trainer\nCalls the add_callbacks_post_trainer method of all registered plugins.\n\n\nadd_callbacks_pre_trainer\nCalls the add_callbacks_pre_trainer method of all registered plugins.\n\n\ncreate_lr_scheduler\nCalls the create_lr_scheduler method of all registered plugins and returns\n\n\ncreate_optimizer\nCalls the create_optimizer method of all registered plugins and returns\n\n\nget_input_args\nReturns a list of Pydantic classes for all registered plugins’ input arguments.’\n\n\nget_instance\nReturns the singleton instance of PluginManager. If the instance doesn’t\n\n\nget_trainer_cls\nCalls the get_trainer_cls method of all registered plugins and returns the\n\n\nload_datasets\nCalls the load_datasets method of each registered plugin.\n\n\npost_lora_load\nCalls the post_lora_load method of all registered plugins.\n\n\npost_model_build\nCalls the post_model_build method of all registered plugins after the\n\n\npost_model_load\nCalls the post_model_load method of all registered plugins after the model\n\n\npost_train\nCalls the post_train method of all registered plugins.\n\n\npost_train_unload\nCalls the post_train_unload method of all registered plugins.\n\n\npost_trainer_create\nCalls the post_trainer_create method of all registered plugins.\n\n\npre_lora_load\nCalls the pre_lora_load method of all registered plugins.\n\n\npre_model_load\nCalls the pre_model_load method of all registered plugins.\n\n\nregister\nRegisters a new plugin by its name.\n\n\n\n\n\nintegrations.base.PluginManager.add_callbacks_post_trainer(cfg, trainer)\nCalls the add_callbacks_post_trainer method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added to the TrainingArgs.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.add_callbacks_pre_trainer(cfg, model)\nCalls the add_callbacks_pre_trainer method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added to the TrainingArgs.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.create_lr_scheduler(\n trainer,\n optimizer,\n num_training_steps,\n)\nCalls the create_lr_scheduler method of all registered plugins and returns\nthe first non-None scheduler.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\noptimizer\nOptimizer\nThe optimizer for training.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nLRScheduler | None\nThe created learning rate scheduler, or None if not found.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.create_optimizer(trainer)\nCalls the create_optimizer method of all registered plugins and returns\nthe first non-None optimizer.\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nOptimizer | None\nThe created optimizer, or None if none was found.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.get_input_args()\nReturns a list of Pydantic classes for all registered plugins’ input arguments.’\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[str]\nA list of Pydantic classes for all registered plugins’ input arguments.’\n\n\n\n\n\n\n\nintegrations.base.PluginManager.get_instance()\nReturns the singleton instance of PluginManager. If the instance doesn’t\nexist, it creates a new one.\n\n\n\nintegrations.base.PluginManager.get_trainer_cls(cfg)\nCalls the get_trainer_cls method of all registered plugins and returns the\nfirst non-None trainer class.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainer | None\nThe first non-None trainer class returned by a plugin.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.load_datasets(cfg, preprocess=False)\nCalls the load_datasets method of each registered plugin.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\npreprocess\nbool\nWhether this is preprocess step of the datasets.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nUnion['TrainDatasetMeta', None]\nThe dataset metadata loaded from all registered plugins.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_lora_load(cfg, model)\nCalls the post_lora_load method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_model_build(cfg, model)\nCalls the post_model_build method of all registered plugins after the\nmodel has been built / loaded, but before any adapters have been applied.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_model_load(cfg, model)\nCalls the post_model_load method of all registered plugins after the model\nhas been loaded inclusive of any adapters.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_train(cfg, model)\nCalls the post_train method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_train_unload(cfg)\nCalls the post_train_unload method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_trainer_create(cfg, trainer)\nCalls the post_trainer_create method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.pre_lora_load(cfg, model)\nCalls the pre_lora_load method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.pre_model_load(cfg)\nCalls the pre_model_load method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.register(plugin_name)\nRegisters a new plugin by its name.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nplugin_name\nstr\nThe name of the plugin to be registered.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nImportError\nIf the plugin module cannot be imported.\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nload_plugin\nLoads a plugin based on the given plugin name.\n\n\n\n\n\nintegrations.base.load_plugin(plugin_name)\nLoads a plugin based on the given plugin name.\nThe plugin name should be in the format “module_name.class_name”. This function\nsplits the plugin name into module and class, imports the module, retrieves the\nclass from the module, and creates an instance of the class.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nplugin_name\nstr\nThe name of the plugin to be loaded. The name should be in the format “module_name.class_name”.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nBasePlugin\nAn instance of the loaded plugin.\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nImportError\nIf the plugin module cannot be imported." }, { "objectID": "docs/api/integrations.base.html#classes", "href": "docs/api/integrations.base.html#classes", "title": "integrations.base", "section": "", - "text": "Name\nDescription\n\n\n\n\nBaseOptimizerFactory\nBase class for factories to create custom optimizers\n\n\nBasePlugin\nBase class for all plugins. Defines the interface for plugin methods.\n\n\nPluginManager\nThe PluginManager class is responsible for loading and managing plugins. It\n\n\n\n\n\nintegrations.base.BaseOptimizerFactory()\nBase class for factories to create custom optimizers\n\n\n\nintegrations.base.BasePlugin(self)\nBase class for all plugins. Defines the interface for plugin methods.\nA plugin is a reusable, modular, and self-contained piece of code that extends\nthe functionality of Axolotl. Plugins can be used to integrate third-party models,\nmodify the training process, or add new features.\nTo create a new plugin, you need to inherit from the BasePlugin class and\nimplement the required methods.\n\n\nPlugin methods include:\n- register(cfg): Registers the plugin with the given configuration.\n- load_datasets(cfg): Loads and preprocesses the dataset for training.\n- pre_model_load(cfg): Performs actions before the model is loaded.\n- post_model_build(cfg, model): Performs actions after the model is loaded, but\nbefore LoRA adapters are applied.\n- pre_lora_load(cfg, model): Performs actions before LoRA weights are loaded.\n- post_lora_load(cfg, model): Performs actions after LoRA weights are loaded.\n- post_model_load(cfg, model): Performs actions after the model is loaded,\ninclusive of any adapters.\n- post_trainer_create(cfg, trainer): Performs actions after the trainer is\ncreated.\n- create_optimizer(cfg, trainer): Creates and returns an optimizer for training.\n- create_lr_scheduler(cfg, trainer, optimizer, num_training_steps): Creates and\nreturns a learning rate scheduler.\n- add_callbacks_pre_trainer(cfg, model): Adds callbacks to the trainer before\ntraining.\n- add_callbacks_post_trainer(cfg, trainer): Adds callbacks to the trainer after\ntraining.\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nadd_callbacks_post_trainer\nAdds callbacks to the trainer after creating the trainer. This is useful for\n\n\nadd_callbacks_pre_trainer\nSet up callbacks before creating the trainer.\n\n\ncreate_lr_scheduler\nCreates and returns a learning rate scheduler.\n\n\ncreate_optimizer\nCreates and returns an optimizer for training.\n\n\nget_input_args\nReturns a pydantic model for the plugin’s input arguments.\n\n\nget_trainer_cls\nReturns a custom class for the trainer.\n\n\nload_datasets\nLoads and preprocesses the dataset for training.\n\n\npost_lora_load\nPerforms actions after LoRA weights are loaded.\n\n\npost_model_build\nPerforms actions after the model is built/loaded, but before any adapters are applied.\n\n\npost_model_load\nPerforms actions after the model is loaded.\n\n\npost_train\nPerforms actions after training is complete.\n\n\npost_train_unload\nPerforms actions after training is complete and the model is unloaded.\n\n\npost_trainer_create\nPerforms actions after the trainer is created.\n\n\npre_lora_load\nPerforms actions before LoRA weights are loaded.\n\n\npre_model_load\nPerforms actions before the model is loaded.\n\n\nregister\nRegisters the plugin with the given configuration.\n\n\n\n\n\nintegrations.base.BasePlugin.add_callbacks_post_trainer(cfg, trainer)\nAdds callbacks to the trainer after creating the trainer. This is useful for\ncallbacks that require access to the model or trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.add_callbacks_pre_trainer(cfg, model)\nSet up callbacks before creating the trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added to the TrainingArgs.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.create_lr_scheduler(\n cfg,\n trainer,\n optimizer,\n num_training_steps,\n)\nCreates and returns a learning rate scheduler.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\noptimizer\nOptimizer\nThe optimizer for training.\nrequired\n\n\nnum_training_steps\nint\nTotal number of training steps\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nLRScheduler | None\nThe created learning rate scheduler.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.create_optimizer(cfg, trainer)\nCreates and returns an optimizer for training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nOptimizer | None\nThe created optimizer.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.get_input_args()\nReturns a pydantic model for the plugin’s input arguments.\n\n\n\nintegrations.base.BasePlugin.get_trainer_cls(cfg)\nReturns a custom class for the trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe global axolotl configuration.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainer | None\nThe first non-None trainer class returned by a plugin.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.load_datasets(cfg, preprocess=False)\nLoads and preprocesses the dataset for training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\npreprocess\nbool\nWhether this is the preprocess step of the datasets.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\ndataset_meta\nUnion['TrainDatasetMeta', None]\nThe metadata for the training dataset.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_lora_load(cfg, model)\nPerforms actions after LoRA weights are loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_model_build(cfg, model)\nPerforms actions after the model is built/loaded, but before any adapters are applied.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_model_load(cfg, model)\nPerforms actions after the model is loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_train(cfg, model)\nPerforms actions after training is complete.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe axolotl configuration.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_train_unload(cfg)\nPerforms actions after training is complete and the model is unloaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_trainer_create(cfg, trainer)\nPerforms actions after the trainer is created.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.pre_lora_load(cfg, model)\nPerforms actions before LoRA weights are loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.pre_model_load(cfg)\nPerforms actions before the model is loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.register(cfg)\nRegisters the plugin with the given configuration.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\n\n\nintegrations.base.PluginManager()\nThe PluginManager class is responsible for loading and managing plugins. It\nshould be a singleton so it can be accessed from anywhere in the codebase.\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nplugins\nOrderedDict[str, BasePlugin]\nA list of loaded plugins.\n\n\n\n\n\n\nKey methods include:\n- get_instance(): Static method to get the singleton instance of PluginManager.\n- register(plugin_name: str): Registers a new plugin by its name.\n- pre_model_load(cfg): Calls the pre_model_load method of all registered plugins.\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nadd_callbacks_post_trainer\nCalls the add_callbacks_post_trainer method of all registered plugins.\n\n\nadd_callbacks_pre_trainer\nCalls the add_callbacks_pre_trainer method of all registered plugins.\n\n\ncreate_lr_scheduler\nCalls the create_lr_scheduler method of all registered plugins and returns\n\n\ncreate_optimizer\nCalls the create_optimizer method of all registered plugins and returns\n\n\nget_input_args\nReturns a list of Pydantic classes for all registered plugins’ input arguments.’\n\n\nget_instance\nReturns the singleton instance of PluginManager. If the instance doesn’t\n\n\nget_trainer_cls\nCalls the get_trainer_cls method of all registered plugins and returns the\n\n\nload_datasets\nCalls the load_datasets method of each registered plugin.\n\n\npost_lora_load\nCalls the post_lora_load method of all registered plugins.\n\n\npost_model_build\nCalls the post_model_build method of all registered plugins after the\n\n\npost_model_load\nCalls the post_model_load method of all registered plugins after the model\n\n\npost_train\nCalls the post_train method of all registered plugins.\n\n\npost_train_unload\nCalls the post_train_unload method of all registered plugins.\n\n\npost_trainer_create\nCalls the post_trainer_create method of all registered plugins.\n\n\npre_lora_load\nCalls the pre_lora_load method of all registered plugins.\n\n\npre_model_load\nCalls the pre_model_load method of all registered plugins.\n\n\nregister\nRegisters a new plugin by its name.\n\n\n\n\n\nintegrations.base.PluginManager.add_callbacks_post_trainer(cfg, trainer)\nCalls the add_callbacks_post_trainer method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added to the TrainingArgs.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.add_callbacks_pre_trainer(cfg, model)\nCalls the add_callbacks_pre_trainer method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added to the TrainingArgs.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.create_lr_scheduler(\n trainer,\n optimizer,\n num_training_steps,\n)\nCalls the create_lr_scheduler method of all registered plugins and returns\nthe first non-None scheduler.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\noptimizer\nOptimizer\nThe optimizer for training.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nLRScheduler | None\nThe created learning rate scheduler, or None if not found.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.create_optimizer(trainer)\nCalls the create_optimizer method of all registered plugins and returns\nthe first non-None optimizer.\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nOptimizer | None\nThe created optimizer, or None if none was found.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.get_input_args()\nReturns a list of Pydantic classes for all registered plugins’ input arguments.’\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[str]\nA list of Pydantic classes for all registered plugins’ input arguments.’\n\n\n\n\n\n\n\nintegrations.base.PluginManager.get_instance()\nReturns the singleton instance of PluginManager. If the instance doesn’t\nexist, it creates a new one.\n\n\n\nintegrations.base.PluginManager.get_trainer_cls(cfg)\nCalls the get_trainer_cls method of all registered plugins and returns the\nfirst non-None trainer class.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainer | None\nThe first non-None trainer class returned by a plugin.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.load_datasets(cfg, preprocess=False)\nCalls the load_datasets method of each registered plugin.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\npreprocess\nbool\nWhether this is preprocess step of the datasets.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nUnion['TrainDatasetMeta', None]\nThe dataset metadata loaded from all registered plugins.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_lora_load(cfg, model)\nCalls the post_lora_load method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_model_build(cfg, model)\nCalls the post_model_build method of all registered plugins after the\nmodel has been built / loaded, but before any adapters have been applied.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_model_load(cfg, model)\nCalls the post_model_load method of all registered plugins after the model\nhas been loaded inclusive of any adapters.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_train(cfg, model)\nCalls the post_train method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_train_unload(cfg)\nCalls the post_train_unload method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_trainer_create(cfg, trainer)\nCalls the post_trainer_create method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.pre_lora_load(cfg, model)\nCalls the pre_lora_load method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.pre_model_load(cfg)\nCalls the pre_model_load method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.register(plugin_name)\nRegisters a new plugin by its name.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nplugin_name\nstr\nThe name of the plugin to be registered.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nImportError\nIf the plugin module cannot be imported." + "text": "Name\nDescription\n\n\n\n\nBaseOptimizerFactory\nBase class for factories to create custom optimizers\n\n\nBasePlugin\nBase class for all plugins. Defines the interface for plugin methods.\n\n\nPluginManager\nThe PluginManager class is responsible for loading and managing plugins. It\n\n\n\n\n\nintegrations.base.BaseOptimizerFactory()\nBase class for factories to create custom optimizers\n\n\n\nintegrations.base.BasePlugin()\nBase class for all plugins. Defines the interface for plugin methods.\nA plugin is a reusable, modular, and self-contained piece of code that extends\nthe functionality of Axolotl. Plugins can be used to integrate third-party models,\nmodify the training process, or add new features.\nTo create a new plugin, you need to inherit from the BasePlugin class and\nimplement the required methods.\n\n\nPlugin methods include:\n- register(cfg): Registers the plugin with the given configuration.\n- load_datasets(cfg): Loads and preprocesses the dataset for training.\n- pre_model_load(cfg): Performs actions before the model is loaded.\n- post_model_build(cfg, model): Performs actions after the model is loaded, but\nbefore LoRA adapters are applied.\n- pre_lora_load(cfg, model): Performs actions before LoRA weights are loaded.\n- post_lora_load(cfg, model): Performs actions after LoRA weights are loaded.\n- post_model_load(cfg, model): Performs actions after the model is loaded,\ninclusive of any adapters.\n- post_trainer_create(cfg, trainer): Performs actions after the trainer is\ncreated.\n- create_optimizer(cfg, trainer): Creates and returns an optimizer for training.\n- create_lr_scheduler(cfg, trainer, optimizer, num_training_steps): Creates and\nreturns a learning rate scheduler.\n- add_callbacks_pre_trainer(cfg, model): Adds callbacks to the trainer before\ntraining.\n- add_callbacks_post_trainer(cfg, trainer): Adds callbacks to the trainer after\ntraining.\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nadd_callbacks_post_trainer\nAdds callbacks to the trainer after creating the trainer. This is useful for\n\n\nadd_callbacks_pre_trainer\nSet up callbacks before creating the trainer.\n\n\ncreate_lr_scheduler\nCreates and returns a learning rate scheduler.\n\n\ncreate_optimizer\nCreates and returns an optimizer for training.\n\n\nget_input_args\nReturns a pydantic model for the plugin’s input arguments.\n\n\nget_trainer_cls\nReturns a custom class for the trainer.\n\n\nload_datasets\nLoads and preprocesses the dataset for training.\n\n\npost_lora_load\nPerforms actions after LoRA weights are loaded.\n\n\npost_model_build\nPerforms actions after the model is built/loaded, but before any adapters are applied.\n\n\npost_model_load\nPerforms actions after the model is loaded.\n\n\npost_train\nPerforms actions after training is complete.\n\n\npost_train_unload\nPerforms actions after training is complete and the model is unloaded.\n\n\npost_trainer_create\nPerforms actions after the trainer is created.\n\n\npre_lora_load\nPerforms actions before LoRA weights are loaded.\n\n\npre_model_load\nPerforms actions before the model is loaded.\n\n\nregister\nRegisters the plugin with the given configuration.\n\n\n\n\n\nintegrations.base.BasePlugin.add_callbacks_post_trainer(cfg, trainer)\nAdds callbacks to the trainer after creating the trainer. This is useful for\ncallbacks that require access to the model or trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.add_callbacks_pre_trainer(cfg, model)\nSet up callbacks before creating the trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added to the TrainingArgs.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.create_lr_scheduler(\n cfg,\n trainer,\n optimizer,\n num_training_steps,\n)\nCreates and returns a learning rate scheduler.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\noptimizer\nOptimizer\nThe optimizer for training.\nrequired\n\n\nnum_training_steps\nint\nTotal number of training steps\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nLRScheduler | None\nThe created learning rate scheduler.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.create_optimizer(cfg, trainer)\nCreates and returns an optimizer for training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nOptimizer | None\nThe created optimizer.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.get_input_args()\nReturns a pydantic model for the plugin’s input arguments.\n\n\n\nintegrations.base.BasePlugin.get_trainer_cls(cfg)\nReturns a custom class for the trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe global axolotl configuration.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainer | None\nThe first non-None trainer class returned by a plugin.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.load_datasets(cfg, preprocess=False)\nLoads and preprocesses the dataset for training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\npreprocess\nbool\nWhether this is the preprocess step of the datasets.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\ndataset_meta\nUnion['TrainDatasetMeta', None]\nThe metadata for the training dataset.\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_lora_load(cfg, model)\nPerforms actions after LoRA weights are loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_model_build(cfg, model)\nPerforms actions after the model is built/loaded, but before any adapters are applied.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_model_load(cfg, model)\nPerforms actions after the model is loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_train(cfg, model)\nPerforms actions after training is complete.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe axolotl configuration.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_train_unload(cfg)\nPerforms actions after training is complete and the model is unloaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.post_trainer_create(cfg, trainer)\nPerforms actions after the trainer is created.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.pre_lora_load(cfg, model)\nPerforms actions before LoRA weights are loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.pre_model_load(cfg)\nPerforms actions before the model is loaded.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\nintegrations.base.BasePlugin.register(cfg)\nRegisters the plugin with the given configuration.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugin.\nrequired\n\n\n\n\n\n\n\n\n\nintegrations.base.PluginManager()\nThe PluginManager class is responsible for loading and managing plugins. It\nshould be a singleton so it can be accessed from anywhere in the codebase.\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nplugins\nOrderedDict[str, BasePlugin]\nA list of loaded plugins.\n\n\n\n\n\n\nKey methods include:\n- get_instance(): Static method to get the singleton instance of PluginManager.\n- register(plugin_name: str): Registers a new plugin by its name.\n- pre_model_load(cfg): Calls the pre_model_load method of all registered plugins.\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nadd_callbacks_post_trainer\nCalls the add_callbacks_post_trainer method of all registered plugins.\n\n\nadd_callbacks_pre_trainer\nCalls the add_callbacks_pre_trainer method of all registered plugins.\n\n\ncreate_lr_scheduler\nCalls the create_lr_scheduler method of all registered plugins and returns\n\n\ncreate_optimizer\nCalls the create_optimizer method of all registered plugins and returns\n\n\nget_input_args\nReturns a list of Pydantic classes for all registered plugins’ input arguments.’\n\n\nget_instance\nReturns the singleton instance of PluginManager. If the instance doesn’t\n\n\nget_trainer_cls\nCalls the get_trainer_cls method of all registered plugins and returns the\n\n\nload_datasets\nCalls the load_datasets method of each registered plugin.\n\n\npost_lora_load\nCalls the post_lora_load method of all registered plugins.\n\n\npost_model_build\nCalls the post_model_build method of all registered plugins after the\n\n\npost_model_load\nCalls the post_model_load method of all registered plugins after the model\n\n\npost_train\nCalls the post_train method of all registered plugins.\n\n\npost_train_unload\nCalls the post_train_unload method of all registered plugins.\n\n\npost_trainer_create\nCalls the post_trainer_create method of all registered plugins.\n\n\npre_lora_load\nCalls the pre_lora_load method of all registered plugins.\n\n\npre_model_load\nCalls the pre_model_load method of all registered plugins.\n\n\nregister\nRegisters a new plugin by its name.\n\n\n\n\n\nintegrations.base.PluginManager.add_callbacks_post_trainer(cfg, trainer)\nCalls the add_callbacks_post_trainer method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added to the TrainingArgs.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.add_callbacks_pre_trainer(cfg, model)\nCalls the add_callbacks_pre_trainer method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[Callable]\nA list of callback functions to be added to the TrainingArgs.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.create_lr_scheduler(\n trainer,\n optimizer,\n num_training_steps,\n)\nCalls the create_lr_scheduler method of all registered plugins and returns\nthe first non-None scheduler.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\noptimizer\nOptimizer\nThe optimizer for training.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nLRScheduler | None\nThe created learning rate scheduler, or None if not found.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.create_optimizer(trainer)\nCalls the create_optimizer method of all registered plugins and returns\nthe first non-None optimizer.\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nOptimizer | None\nThe created optimizer, or None if none was found.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.get_input_args()\nReturns a list of Pydantic classes for all registered plugins’ input arguments.’\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[str]\nA list of Pydantic classes for all registered plugins’ input arguments.’\n\n\n\n\n\n\n\nintegrations.base.PluginManager.get_instance()\nReturns the singleton instance of PluginManager. If the instance doesn’t\nexist, it creates a new one.\n\n\n\nintegrations.base.PluginManager.get_trainer_cls(cfg)\nCalls the get_trainer_cls method of all registered plugins and returns the\nfirst non-None trainer class.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainer | None\nThe first non-None trainer class returned by a plugin.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.load_datasets(cfg, preprocess=False)\nCalls the load_datasets method of each registered plugin.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\npreprocess\nbool\nWhether this is preprocess step of the datasets.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nUnion['TrainDatasetMeta', None]\nThe dataset metadata loaded from all registered plugins.\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_lora_load(cfg, model)\nCalls the post_lora_load method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_model_build(cfg, model)\nCalls the post_model_build method of all registered plugins after the\nmodel has been built / loaded, but before any adapters have been applied.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_model_load(cfg, model)\nCalls the post_model_load method of all registered plugins after the model\nhas been loaded inclusive of any adapters.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_train(cfg, model)\nCalls the post_train method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel | PeftModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_train_unload(cfg)\nCalls the post_train_unload method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.post_trainer_create(cfg, trainer)\nCalls the post_trainer_create method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object for training.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.pre_lora_load(cfg, model)\nCalls the pre_lora_load method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\nmodel\nPreTrainedModel\nThe loaded model.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.pre_model_load(cfg)\nCalls the pre_model_load method of all registered plugins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration for the plugins.\nrequired\n\n\n\n\n\n\n\nintegrations.base.PluginManager.register(plugin_name)\nRegisters a new plugin by its name.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nplugin_name\nstr\nThe name of the plugin to be registered.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nImportError\nIf the plugin module cannot be imported." }, { "objectID": "docs/api/integrations.base.html#functions", @@ -1136,14 +1136,14 @@ "href": "docs/api/utils.callbacks.perplexity.html", "title": "utils.callbacks.perplexity", "section": "", - "text": "utils.callbacks.perplexity\ncallback to calculate perplexity as an evaluation metric.\n\n\n\n\n\nName\nDescription\n\n\n\n\nPerplexity\nCalculate perplexity as defined in https://huggingface.co/docs/transformers/en/perplexity.\n\n\n\n\n\nutils.callbacks.perplexity.Perplexity(self, tokenizer, max_seq_len, stride=512)\nCalculate perplexity as defined in https://huggingface.co/docs/transformers/en/perplexity.\nThis is a custom variant that doesn’t re-tokenize the input or re-load the model.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncompute\nCompute perplexity in a fixed length sliding window across the sequence.\n\n\n\n\n\nutils.callbacks.perplexity.Perplexity.compute(model, references=None)\nCompute perplexity in a fixed length sliding window across the sequence." + "text": "utils.callbacks.perplexity\ncallback to calculate perplexity as an evaluation metric.\n\n\n\n\n\nName\nDescription\n\n\n\n\nPerplexity\nCalculate perplexity as defined in https://huggingface.co/docs/transformers/en/perplexity.\n\n\n\n\n\nutils.callbacks.perplexity.Perplexity(tokenizer, max_seq_len, stride=512)\nCalculate perplexity as defined in https://huggingface.co/docs/transformers/en/perplexity.\nThis is a custom variant that doesn’t re-tokenize the input or re-load the model.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncompute\nCompute perplexity in a fixed length sliding window across the sequence.\n\n\n\n\n\nutils.callbacks.perplexity.Perplexity.compute(model, references=None)\nCompute perplexity in a fixed length sliding window across the sequence." }, { "objectID": "docs/api/utils.callbacks.perplexity.html#classes", "href": "docs/api/utils.callbacks.perplexity.html#classes", "title": "utils.callbacks.perplexity", "section": "", - "text": "Name\nDescription\n\n\n\n\nPerplexity\nCalculate perplexity as defined in https://huggingface.co/docs/transformers/en/perplexity.\n\n\n\n\n\nutils.callbacks.perplexity.Perplexity(self, tokenizer, max_seq_len, stride=512)\nCalculate perplexity as defined in https://huggingface.co/docs/transformers/en/perplexity.\nThis is a custom variant that doesn’t re-tokenize the input or re-load the model.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncompute\nCompute perplexity in a fixed length sliding window across the sequence.\n\n\n\n\n\nutils.callbacks.perplexity.Perplexity.compute(model, references=None)\nCompute perplexity in a fixed length sliding window across the sequence." + "text": "Name\nDescription\n\n\n\n\nPerplexity\nCalculate perplexity as defined in https://huggingface.co/docs/transformers/en/perplexity.\n\n\n\n\n\nutils.callbacks.perplexity.Perplexity(tokenizer, max_seq_len, stride=512)\nCalculate perplexity as defined in https://huggingface.co/docs/transformers/en/perplexity.\nThis is a custom variant that doesn’t re-tokenize the input or re-load the model.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncompute\nCompute perplexity in a fixed length sliding window across the sequence.\n\n\n\n\n\nutils.callbacks.perplexity.Perplexity.compute(model, references=None)\nCompute perplexity in a fixed length sliding window across the sequence." }, { "objectID": "docs/api/utils.schemas.training.html", @@ -1171,14 +1171,14 @@ "href": "docs/api/utils.samplers.multipack.html", "title": "utils.samplers.multipack", "section": "", - "text": "utils.samplers.multipack\nMultipack Batch Sampler - An efficient batch sampler for packing variable-length sequences\ninto fixed-capacity batches to optimize memory usage and training throughput.\n\n\n\n\n\nName\nDescription\n\n\n\n\nMultipackBatchSampler\nBatch sampler class for efficient packing of variable-length sequences\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler(\n self,\n sampler,\n batch_size,\n batch_max_len,\n lengths,\n packing_efficiency_estimate=1.0,\n drop_last=False,\n num_count_samples=16,\n sequential=False,\n group_size=100000,\n bin_size=200,\n num_processes=None,\n safe_mode=True,\n **kwargs,\n)\nBatch sampler class for efficient packing of variable-length sequences\nThis sampler packs sequences into fixed-capacity bins (batches) to maximize\nGPU memory utilization and training throughput by reducing padding.\nIt supports both parallel packing (using FFD algorithm) and\nsequential packing (preserving original sequence order).\n\n\n\n\n\nName\nDescription\n\n\n\n\nefficiency\nCalculate the packing efficiency (ratio of tokens used to total token slots).\n\n\ngather_efficiency\nGather and synchronize packing efficiency estimates across all distributed\n\n\ngather_len_batches\nGather and synchronize batch counts across all distributed ranks. Returns\n\n\ngenerate_batches\nGenerate packed batches for training.\n\n\nset_epoch\nSet the epoch number, used for reproducible shuffling across epochs\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.efficiency()\nCalculate the packing efficiency (ratio of tokens used to total token slots).\nHigher is better - 1.0 would mean perfect packing with no wasted space.\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.gather_efficiency()\nGather and synchronize packing efficiency estimates across all distributed\nranks.\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nfloat\nA conservative efficiency estimate based on the measurements.\n\n\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.gather_len_batches(num)\nGather and synchronize batch counts across all distributed ranks. Returns\nthe minimum number of batches available on any rank.\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.generate_batches(set_stats=False)\nGenerate packed batches for training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nset_stats\nbool\nWhether to update efficiency statistics.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[list[list[int]]]\nList of batches, where each batch contains multiple bins, and each bin contains multiple sequence indices.\n\n\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.set_epoch(epoch)\nSet the epoch number, used for reproducible shuffling across epochs\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nallocate_sequentially\nSequential allocator that preserves example order.\n\n\nffd_check\nFirst-fit-decreasing bin packing algorithm check.\n\n\npack_group\nPack a group of sequences into bins using First-Fit Decreasing algorithm.\n\n\npack_parallel\nPack sequences into bins using parallel processing.\n\n\n\n\n\nutils.samplers.multipack.allocate_sequentially(\n sequence_lengths,\n rank,\n bin_capacity,\n num_ranks,\n)\nSequential allocator that preserves example order.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nsequence_lengths\nnp.ndarray\nThe lengths of all examples.\nrequired\n\n\nrank\nint\nThe current rank (for distributed training).\nrequired\n\n\nbin_capacity\nint\nThe capacity of each bin (maximum sequence length).\nrequired\n\n\nnum_ranks\nint\nNumber of ranks (processes / GPUs).\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nrank_batches\nlist[list[int]]\nList of batches for the current rank.\n\n\ntotal_tokens_used\nint\nNumber of actual example tokens.\n\n\ntotal_token_slots\nint\nMaximum theoretical number of example tokens (number of bins * bin capacity).\n\n\n\n\n\n\n\nutils.samplers.multipack.ffd_check(sequence_lengths, bin_capacity, num_bins)\nFirst-fit-decreasing bin packing algorithm check.\nChecks if sequences with the given lengths could fit in the specified number of\nbins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nsequence_lengths\nnp.ndarray\nArray of sequence lengths.\nrequired\n\n\nbin_capacity\nint\nMaximum capacity of each bin.\nrequired\n\n\nnum_bins\nint\nNumber of bins available.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nbool\nTrue if all sequences can be packed, False otherwise.\n\n\n\n\n\n\n\nutils.samplers.multipack.pack_group(\n sequence_lengths,\n group_offset,\n bin_capacity,\n max_bins,\n bin_size,\n safe_mode=True,\n)\nPack a group of sequences into bins using First-Fit Decreasing algorithm.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nsequence_lengths\nnp.ndarray\nArray of sequence lengths.\nrequired\n\n\ngroup_offset\nint\nOffset to apply to indices when returning results.\nrequired\n\n\nbin_capacity\nint\nMaximum capacity of each bin.\nrequired\n\n\nmax_bins\nint\nMaximum number of bins to use.\nrequired\n\n\nbin_size\nint\nMaximum number of sequences per bin.\nrequired\n\n\nsafe_mode\nbool\nIf True, use a more conservative packing approach.\nTrue\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[list[int]]\nList of bins, where each bin contains indices of sequences assigned to it.\n\n\n\n\n\n\n\nutils.samplers.multipack.pack_parallel(\n sequence_lengths,\n bin_capacity,\n group_size,\n bin_size,\n num_processes=None,\n safe_mode=True,\n mp_start_method='spawn',\n)\nPack sequences into bins using parallel processing.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nsequence_lengths\nnp.ndarray\nArray of sequence lengths.\nrequired\n\n\nbin_capacity\nint\nMaximum capacity of each bin as total number of tokens.\nrequired\n\n\ngroup_size\nint\nNumber of sequences to process in each group.\nrequired\n\n\nbin_size\nint\nMaximum number of bins to use.\nrequired\n\n\nnum_processes\nint | None\nNumber of parallel processes to use.\nNone\n\n\nsafe_mode\nbool\nIf True, use a more conservative packing approach.\nTrue\n\n\nmp_start_method\nstr | None\nMultiprocessing start method (‘fork’, ‘spawn’, ‘forkserver’). ‘spawn’ is often safer with Numba/PyTorch. Set to None to use system default.\n'spawn'\n\n\n\nReturns:\nList of bins, where each bin contains indices of sequences assigned to it." + "text": "utils.samplers.multipack\nMultipack Batch Sampler - An efficient batch sampler for packing variable-length sequences\ninto fixed-capacity batches to optimize memory usage and training throughput.\n\n\n\n\n\nName\nDescription\n\n\n\n\nMultipackBatchSampler\nBatch sampler class for efficient packing of variable-length sequences\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler(\n sampler,\n batch_size,\n batch_max_len,\n lengths,\n packing_efficiency_estimate=1.0,\n drop_last=False,\n num_count_samples=16,\n sequential=False,\n group_size=100000,\n bin_size=200,\n num_processes=None,\n safe_mode=True,\n **kwargs,\n)\nBatch sampler class for efficient packing of variable-length sequences\nThis sampler packs sequences into fixed-capacity bins (batches) to maximize\nGPU memory utilization and training throughput by reducing padding.\nIt supports both parallel packing (using FFD algorithm) and\nsequential packing (preserving original sequence order).\n\n\n\n\n\nName\nDescription\n\n\n\n\nefficiency\nCalculate the packing efficiency (ratio of tokens used to total token slots).\n\n\ngather_efficiency\nGather and synchronize packing efficiency estimates across all distributed\n\n\ngather_len_batches\nGather and synchronize batch counts across all distributed ranks. Returns\n\n\ngenerate_batches\nGenerate packed batches for training.\n\n\nset_epoch\nSet the epoch number, used for reproducible shuffling across epochs\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.efficiency()\nCalculate the packing efficiency (ratio of tokens used to total token slots).\nHigher is better - 1.0 would mean perfect packing with no wasted space.\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.gather_efficiency()\nGather and synchronize packing efficiency estimates across all distributed\nranks.\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nfloat\nA conservative efficiency estimate based on the measurements.\n\n\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.gather_len_batches(num)\nGather and synchronize batch counts across all distributed ranks. Returns\nthe minimum number of batches available on any rank.\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.generate_batches(set_stats=False)\nGenerate packed batches for training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nset_stats\nbool\nWhether to update efficiency statistics.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[list[list[int]]]\nList of batches, where each batch contains multiple bins, and each bin contains multiple sequence indices.\n\n\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.set_epoch(epoch)\nSet the epoch number, used for reproducible shuffling across epochs\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nallocate_sequentially\nSequential allocator that preserves example order.\n\n\nffd_check\nFirst-fit-decreasing bin packing algorithm check.\n\n\npack_group\nPack a group of sequences into bins using First-Fit Decreasing algorithm.\n\n\npack_parallel\nPack sequences into bins using parallel processing.\n\n\n\n\n\nutils.samplers.multipack.allocate_sequentially(\n sequence_lengths,\n rank,\n bin_capacity,\n num_ranks,\n)\nSequential allocator that preserves example order.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nsequence_lengths\nnp.ndarray\nThe lengths of all examples.\nrequired\n\n\nrank\nint\nThe current rank (for distributed training).\nrequired\n\n\nbin_capacity\nint\nThe capacity of each bin (maximum sequence length).\nrequired\n\n\nnum_ranks\nint\nNumber of ranks (processes / GPUs).\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nrank_batches\nlist[list[int]]\nList of batches for the current rank.\n\n\ntotal_tokens_used\nint\nNumber of actual example tokens.\n\n\ntotal_token_slots\nint\nMaximum theoretical number of example tokens (number of bins * bin capacity).\n\n\n\n\n\n\n\nutils.samplers.multipack.ffd_check(sequence_lengths, bin_capacity, num_bins)\nFirst-fit-decreasing bin packing algorithm check.\nChecks if sequences with the given lengths could fit in the specified number of\nbins.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nsequence_lengths\nnp.ndarray\nArray of sequence lengths.\nrequired\n\n\nbin_capacity\nint\nMaximum capacity of each bin.\nrequired\n\n\nnum_bins\nint\nNumber of bins available.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nbool\nTrue if all sequences can be packed, False otherwise.\n\n\n\n\n\n\n\nutils.samplers.multipack.pack_group(\n sequence_lengths,\n group_offset,\n bin_capacity,\n max_bins,\n bin_size,\n safe_mode=True,\n)\nPack a group of sequences into bins using First-Fit Decreasing algorithm.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nsequence_lengths\nnp.ndarray\nArray of sequence lengths.\nrequired\n\n\ngroup_offset\nint\nOffset to apply to indices when returning results.\nrequired\n\n\nbin_capacity\nint\nMaximum capacity of each bin.\nrequired\n\n\nmax_bins\nint\nMaximum number of bins to use.\nrequired\n\n\nbin_size\nint\nMaximum number of sequences per bin.\nrequired\n\n\nsafe_mode\nbool\nIf True, use a more conservative packing approach.\nTrue\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[list[int]]\nList of bins, where each bin contains indices of sequences assigned to it.\n\n\n\n\n\n\n\nutils.samplers.multipack.pack_parallel(\n sequence_lengths,\n bin_capacity,\n group_size,\n bin_size,\n num_processes=None,\n safe_mode=True,\n mp_start_method='spawn',\n)\nPack sequences into bins using parallel processing.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nsequence_lengths\nnp.ndarray\nArray of sequence lengths.\nrequired\n\n\nbin_capacity\nint\nMaximum capacity of each bin as total number of tokens.\nrequired\n\n\ngroup_size\nint\nNumber of sequences to process in each group.\nrequired\n\n\nbin_size\nint\nMaximum number of bins to use.\nrequired\n\n\nnum_processes\nint | None\nNumber of parallel processes to use.\nNone\n\n\nsafe_mode\nbool\nIf True, use a more conservative packing approach.\nTrue\n\n\nmp_start_method\nstr | None\nMultiprocessing start method (‘fork’, ‘spawn’, ‘forkserver’). ‘spawn’ is often safer with Numba/PyTorch. Set to None to use system default.\n'spawn'\n\n\n\nReturns:\nList of bins, where each bin contains indices of sequences assigned to it." }, { "objectID": "docs/api/utils.samplers.multipack.html#classes", "href": "docs/api/utils.samplers.multipack.html#classes", "title": "utils.samplers.multipack", "section": "", - "text": "Name\nDescription\n\n\n\n\nMultipackBatchSampler\nBatch sampler class for efficient packing of variable-length sequences\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler(\n self,\n sampler,\n batch_size,\n batch_max_len,\n lengths,\n packing_efficiency_estimate=1.0,\n drop_last=False,\n num_count_samples=16,\n sequential=False,\n group_size=100000,\n bin_size=200,\n num_processes=None,\n safe_mode=True,\n **kwargs,\n)\nBatch sampler class for efficient packing of variable-length sequences\nThis sampler packs sequences into fixed-capacity bins (batches) to maximize\nGPU memory utilization and training throughput by reducing padding.\nIt supports both parallel packing (using FFD algorithm) and\nsequential packing (preserving original sequence order).\n\n\n\n\n\nName\nDescription\n\n\n\n\nefficiency\nCalculate the packing efficiency (ratio of tokens used to total token slots).\n\n\ngather_efficiency\nGather and synchronize packing efficiency estimates across all distributed\n\n\ngather_len_batches\nGather and synchronize batch counts across all distributed ranks. Returns\n\n\ngenerate_batches\nGenerate packed batches for training.\n\n\nset_epoch\nSet the epoch number, used for reproducible shuffling across epochs\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.efficiency()\nCalculate the packing efficiency (ratio of tokens used to total token slots).\nHigher is better - 1.0 would mean perfect packing with no wasted space.\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.gather_efficiency()\nGather and synchronize packing efficiency estimates across all distributed\nranks.\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nfloat\nA conservative efficiency estimate based on the measurements.\n\n\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.gather_len_batches(num)\nGather and synchronize batch counts across all distributed ranks. Returns\nthe minimum number of batches available on any rank.\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.generate_batches(set_stats=False)\nGenerate packed batches for training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nset_stats\nbool\nWhether to update efficiency statistics.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[list[list[int]]]\nList of batches, where each batch contains multiple bins, and each bin contains multiple sequence indices.\n\n\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.set_epoch(epoch)\nSet the epoch number, used for reproducible shuffling across epochs" + "text": "Name\nDescription\n\n\n\n\nMultipackBatchSampler\nBatch sampler class for efficient packing of variable-length sequences\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler(\n sampler,\n batch_size,\n batch_max_len,\n lengths,\n packing_efficiency_estimate=1.0,\n drop_last=False,\n num_count_samples=16,\n sequential=False,\n group_size=100000,\n bin_size=200,\n num_processes=None,\n safe_mode=True,\n **kwargs,\n)\nBatch sampler class for efficient packing of variable-length sequences\nThis sampler packs sequences into fixed-capacity bins (batches) to maximize\nGPU memory utilization and training throughput by reducing padding.\nIt supports both parallel packing (using FFD algorithm) and\nsequential packing (preserving original sequence order).\n\n\n\n\n\nName\nDescription\n\n\n\n\nefficiency\nCalculate the packing efficiency (ratio of tokens used to total token slots).\n\n\ngather_efficiency\nGather and synchronize packing efficiency estimates across all distributed\n\n\ngather_len_batches\nGather and synchronize batch counts across all distributed ranks. Returns\n\n\ngenerate_batches\nGenerate packed batches for training.\n\n\nset_epoch\nSet the epoch number, used for reproducible shuffling across epochs\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.efficiency()\nCalculate the packing efficiency (ratio of tokens used to total token slots).\nHigher is better - 1.0 would mean perfect packing with no wasted space.\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.gather_efficiency()\nGather and synchronize packing efficiency estimates across all distributed\nranks.\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nfloat\nA conservative efficiency estimate based on the measurements.\n\n\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.gather_len_batches(num)\nGather and synchronize batch counts across all distributed ranks. Returns\nthe minimum number of batches available on any rank.\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.generate_batches(set_stats=False)\nGenerate packed batches for training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nset_stats\nbool\nWhether to update efficiency statistics.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nlist[list[list[int]]]\nList of batches, where each batch contains multiple bins, and each bin contains multiple sequence indices.\n\n\n\n\n\n\n\nutils.samplers.multipack.MultipackBatchSampler.set_epoch(epoch)\nSet the epoch number, used for reproducible shuffling across epochs" }, { "objectID": "docs/api/utils.samplers.multipack.html#functions", @@ -1192,14 +1192,14 @@ "href": "docs/api/utils.collators.mamba.html", "title": "utils.collators.mamba", "section": "", - "text": "utils.collators.mamba\ncollators for Mamba\n\n\n\n\n\nName\nDescription\n\n\n\n\nMambaDataCollator\nCollator for State Space Models (Mamba)\n\n\n\n\n\nutils.collators.mamba.MambaDataCollator(self, tokenizer)\nCollator for State Space Models (Mamba)" + "text": "utils.collators.mamba\ncollators for Mamba\n\n\n\n\n\nName\nDescription\n\n\n\n\nMambaDataCollator\nCollator for State Space Models (Mamba)\n\n\n\n\n\nutils.collators.mamba.MambaDataCollator(tokenizer)\nCollator for State Space Models (Mamba)" }, { "objectID": "docs/api/utils.collators.mamba.html#classes", "href": "docs/api/utils.collators.mamba.html#classes", "title": "utils.collators.mamba", "section": "", - "text": "Name\nDescription\n\n\n\n\nMambaDataCollator\nCollator for State Space Models (Mamba)\n\n\n\n\n\nutils.collators.mamba.MambaDataCollator(self, tokenizer)\nCollator for State Space Models (Mamba)" + "text": "Name\nDescription\n\n\n\n\nMambaDataCollator\nCollator for State Space Models (Mamba)\n\n\n\n\n\nutils.collators.mamba.MambaDataCollator(tokenizer)\nCollator for State Space Models (Mamba)" }, { "objectID": "docs/api/cli.merge_sharded_fsdp_weights.html", @@ -1223,32 +1223,53 @@ "text": "Name\nDescription\n\n\n\n\ndo_cli\nParses axolotl config, CLI args, and calls merge_fsdp_weights.\n\n\nmerge_fsdp_weights\nMerge the weights from sharded FSDP model checkpoints into a single combined checkpoint. Should be used if\n\n\n\n\n\ncli.merge_sharded_fsdp_weights.do_cli(config=Path('examples/'), **kwargs)\nParses axolotl config, CLI args, and calls merge_fsdp_weights.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nPath to axolotl config YAML file.\nPath('examples/')\n\n\nkwargs\n\nAdditional keyword arguments to override config file values.\n{}\n\n\n\n\n\n\n\ncli.merge_sharded_fsdp_weights.merge_fsdp_weights(\n checkpoint_dir,\n output_path,\n safe_serialization=False,\n remove_checkpoint_dir=False,\n)\nMerge the weights from sharded FSDP model checkpoints into a single combined checkpoint. Should be used if\nSHARDED_STATE_DICT was used for the model. Weights will be saved to {output_path}/model.safetensors if\nsafe_serialization else pytorch_model.bin.\nNote: this is a CPU-bound process.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncheckpoint_dir\nstr\nThe directory containing the FSDP checkpoints (can be either the model or optimizer).\nrequired\n\n\noutput_path\nstr\nThe path to save the merged checkpoint.\nrequired\n\n\nsafe_serialization\nbool, optional, defaults to True\nWhether to save the merged weights with safetensors (recommended).\nFalse\n\n\nremove_checkpoint_dir\nbool, optional, defaults to False\nWhether to remove the checkpoint directory after merging.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nValueError\nIf torch version < 2.3.0, or if checkpoint_dir does not exist." }, { - "objectID": "docs/api/utils.callbacks.profiler.html", - "href": "docs/api/utils.callbacks.profiler.html", - "title": "utils.callbacks.profiler", + "objectID": "docs/api/core.builders.base.html", + "href": "docs/api/core.builders.base.html", + "title": "core.builders.base", "section": "", - "text": "utils.callbacks.profiler\nHF Trainer callback for creating pytorch profiling snapshots\n\n\n\n\n\nName\nDescription\n\n\n\n\nPytorchProfilerCallback\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps.\n\n\n\n\n\nutils.callbacks.profiler.PytorchProfilerCallback(self, steps_to_profile=5)\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps." + "text": "core.builders.base\nBase class for trainer builder\n\n\n\n\n\nName\nDescription\n\n\n\n\nTrainerBuilderBase\nBase class for trainer builder.\n\n\n\n\n\ncore.builders.base.TrainerBuilderBase(cfg, model, tokenizer, processor=None)\nBase class for trainer builder.\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_post_trainer_create_callbacks\nCallbacks added after the trainer is created, usually b/c these need access to the trainer\n\n\n\n\n\ncore.builders.base.TrainerBuilderBase.get_post_trainer_create_callbacks(trainer)\nCallbacks added after the trainer is created, usually b/c these need access to the trainer" }, { - "objectID": "docs/api/utils.callbacks.profiler.html#classes", - "href": "docs/api/utils.callbacks.profiler.html#classes", - "title": "utils.callbacks.profiler", + "objectID": "docs/api/core.builders.base.html#classes", + "href": "docs/api/core.builders.base.html#classes", + "title": "core.builders.base", "section": "", - "text": "Name\nDescription\n\n\n\n\nPytorchProfilerCallback\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps.\n\n\n\n\n\nutils.callbacks.profiler.PytorchProfilerCallback(self, steps_to_profile=5)\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps." + "text": "Name\nDescription\n\n\n\n\nTrainerBuilderBase\nBase class for trainer builder.\n\n\n\n\n\ncore.builders.base.TrainerBuilderBase(cfg, model, tokenizer, processor=None)\nBase class for trainer builder.\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_post_trainer_create_callbacks\nCallbacks added after the trainer is created, usually b/c these need access to the trainer\n\n\n\n\n\ncore.builders.base.TrainerBuilderBase.get_post_trainer_create_callbacks(trainer)\nCallbacks added after the trainer is created, usually b/c these need access to the trainer" }, { - "objectID": "docs/api/loaders.tokenizer.html", - "href": "docs/api/loaders.tokenizer.html", - "title": "loaders.tokenizer", + "objectID": "docs/api/common.datasets.html", + "href": "docs/api/common.datasets.html", + "title": "common.datasets", "section": "", - "text": "loaders.tokenizer\nTokenizer loading functionality and associated utils\n\n\n\n\n\nName\nDescription\n\n\n\n\nload_tokenizer\nLoad and configure the tokenizer based on the provided config.\n\n\nmodify_tokenizer_files\nModify tokenizer files to replace added_tokens strings, save to output directory,\n\n\n\n\n\nloaders.tokenizer.load_tokenizer(cfg)\nLoad and configure the tokenizer based on the provided config.\n\n\n\nloaders.tokenizer.modify_tokenizer_files(\n tokenizer_path,\n token_mappings,\n output_dir,\n)\nModify tokenizer files to replace added_tokens strings, save to output directory,\nand return the path to the modified tokenizer.\nThis only works with reserved tokens that were added to the tokenizer, not tokens\nalready part of the vocab.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntokenizer_path\nstr\nPath or name of the original tokenizer\nrequired\n\n\ntoken_mappings\ndict[int, str]\nDict mapping {token_id (int): new_token_string}\nrequired\n\n\noutput_dir\nstr\nDirectory to save the modified tokenizer\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nstr\nPath to the modified tokenizer directory\n\n\n\nRef: https://github.com/huggingface/transformers/issues/27974#issuecomment-1854188941" + "text": "common.datasets\nDataset loading utilities.\n\n\n\n\n\nName\nDescription\n\n\n\n\nTrainDatasetMeta\nDataclass with fields for training and validation datasets and metadata.\n\n\n\n\n\ncommon.datasets.TrainDatasetMeta(\n train_dataset,\n eval_dataset=None,\n total_num_steps=None,\n)\nDataclass with fields for training and validation datasets and metadata.\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nload_datasets\nLoads one or more training or evaluation datasets, calling\n\n\nload_preference_datasets\nLoads one or more training or evaluation datasets for RL training using paired\n\n\nsample_dataset\nRandomly sample num_samples samples from dataset.\n\n\n\n\n\ncommon.datasets.load_datasets(cfg, cli_args=None, debug=False)\nLoads one or more training or evaluation datasets, calling\naxolotl.utils.data.prepare_dataset. Optionally, logs out debug information.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ncli_args\nPreprocessCliArgs | TrainerCliArgs | None\nCommand-specific CLI arguments.\nNone\n\n\ndebug\nbool\nWhether to print out tokenization of sample\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainDatasetMeta\nDataclass with fields for training and evaluation datasets and the computed\n\n\n\nTrainDatasetMeta\ntotal_num_steps.\n\n\n\n\n\n\n\ncommon.datasets.load_preference_datasets(cfg, cli_args)\nLoads one or more training or evaluation datasets for RL training using paired\npreference data, calling axolotl.utils.data.rl.load_prepare_preference_datasets.\nOptionally, logs out debug information.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ncli_args\nUnion[PreprocessCliArgs, TrainerCliArgs]\nCommand-specific CLI arguments.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainDatasetMeta\nDataclass with fields for training and evaluation datasets and the computed\n\n\n\nTrainDatasetMeta\ntotal_num_steps.\n\n\n\n\n\n\n\ncommon.datasets.sample_dataset(dataset, num_samples)\nRandomly sample num_samples samples from dataset.\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ndataset\nDataset\nDataset.\nrequired\n\n\nnum_samples\nint\nNumber of samples to return.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nDataset\nRandom sample (with replacement) of examples in dataset." }, { - "objectID": "docs/api/loaders.tokenizer.html#functions", - "href": "docs/api/loaders.tokenizer.html#functions", - "title": "loaders.tokenizer", + "objectID": "docs/api/common.datasets.html#classes", + "href": "docs/api/common.datasets.html#classes", + "title": "common.datasets", "section": "", - "text": "Name\nDescription\n\n\n\n\nload_tokenizer\nLoad and configure the tokenizer based on the provided config.\n\n\nmodify_tokenizer_files\nModify tokenizer files to replace added_tokens strings, save to output directory,\n\n\n\n\n\nloaders.tokenizer.load_tokenizer(cfg)\nLoad and configure the tokenizer based on the provided config.\n\n\n\nloaders.tokenizer.modify_tokenizer_files(\n tokenizer_path,\n token_mappings,\n output_dir,\n)\nModify tokenizer files to replace added_tokens strings, save to output directory,\nand return the path to the modified tokenizer.\nThis only works with reserved tokens that were added to the tokenizer, not tokens\nalready part of the vocab.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntokenizer_path\nstr\nPath or name of the original tokenizer\nrequired\n\n\ntoken_mappings\ndict[int, str]\nDict mapping {token_id (int): new_token_string}\nrequired\n\n\noutput_dir\nstr\nDirectory to save the modified tokenizer\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nstr\nPath to the modified tokenizer directory\n\n\n\nRef: https://github.com/huggingface/transformers/issues/27974#issuecomment-1854188941" + "text": "Name\nDescription\n\n\n\n\nTrainDatasetMeta\nDataclass with fields for training and validation datasets and metadata.\n\n\n\n\n\ncommon.datasets.TrainDatasetMeta(\n train_dataset,\n eval_dataset=None,\n total_num_steps=None,\n)\nDataclass with fields for training and validation datasets and metadata." + }, + { + "objectID": "docs/api/common.datasets.html#functions", + "href": "docs/api/common.datasets.html#functions", + "title": "common.datasets", + "section": "", + "text": "Name\nDescription\n\n\n\n\nload_datasets\nLoads one or more training or evaluation datasets, calling\n\n\nload_preference_datasets\nLoads one or more training or evaluation datasets for RL training using paired\n\n\nsample_dataset\nRandomly sample num_samples samples from dataset.\n\n\n\n\n\ncommon.datasets.load_datasets(cfg, cli_args=None, debug=False)\nLoads one or more training or evaluation datasets, calling\naxolotl.utils.data.prepare_dataset. Optionally, logs out debug information.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ncli_args\nPreprocessCliArgs | TrainerCliArgs | None\nCommand-specific CLI arguments.\nNone\n\n\ndebug\nbool\nWhether to print out tokenization of sample\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainDatasetMeta\nDataclass with fields for training and evaluation datasets and the computed\n\n\n\nTrainDatasetMeta\ntotal_num_steps.\n\n\n\n\n\n\n\ncommon.datasets.load_preference_datasets(cfg, cli_args)\nLoads one or more training or evaluation datasets for RL training using paired\npreference data, calling axolotl.utils.data.rl.load_prepare_preference_datasets.\nOptionally, logs out debug information.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ncli_args\nUnion[PreprocessCliArgs, TrainerCliArgs]\nCommand-specific CLI arguments.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainDatasetMeta\nDataclass with fields for training and evaluation datasets and the computed\n\n\n\nTrainDatasetMeta\ntotal_num_steps.\n\n\n\n\n\n\n\ncommon.datasets.sample_dataset(dataset, num_samples)\nRandomly sample num_samples samples from dataset.\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ndataset\nDataset\nDataset.\nrequired\n\n\nnum_samples\nint\nNumber of samples to return.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nDataset\nRandom sample (with replacement) of examples in dataset." + }, + { + "objectID": "docs/api/prompt_strategies.stepwise_supervised.html", + "href": "docs/api/prompt_strategies.stepwise_supervised.html", + "title": "prompt_strategies.stepwise_supervised", + "section": "", + "text": "prompt_strategies.stepwise_supervised\nModule for stepwise datasets, typically including a prompt and reasoning traces,\nand (optionally) per-step, or per-prompt-trace labels for reward modelling.\n\n\n\n\n\nName\nDescription\n\n\n\n\nStepwiseSupervisedPromptTokenizingStrategy\nTokenizing strategy for supervised stepwise datasets, typically used for COT-reasoning.\n\n\n\n\n\nprompt_strategies.stepwise_supervised.StepwiseSupervisedPromptTokenizingStrategy(\n tokenizer,\n sequence_len=2048,\n step_separator='\\n',\n max_completion_length=None,\n train_on_last_step_only=False,\n)\nTokenizing strategy for supervised stepwise datasets, typically used for COT-reasoning.\nThese datasets should include the following columns:\n- prompt: the prompt text\n- completions: a list of n completion steps\n- labels: a list of n labels indicating the “correctness” of each step" + }, + { + "objectID": "docs/api/prompt_strategies.stepwise_supervised.html#classes", + "href": "docs/api/prompt_strategies.stepwise_supervised.html#classes", + "title": "prompt_strategies.stepwise_supervised", + "section": "", + "text": "Name\nDescription\n\n\n\n\nStepwiseSupervisedPromptTokenizingStrategy\nTokenizing strategy for supervised stepwise datasets, typically used for COT-reasoning.\n\n\n\n\n\nprompt_strategies.stepwise_supervised.StepwiseSupervisedPromptTokenizingStrategy(\n tokenizer,\n sequence_len=2048,\n step_separator='\\n',\n max_completion_length=None,\n train_on_last_step_only=False,\n)\nTokenizing strategy for supervised stepwise datasets, typically used for COT-reasoning.\nThese datasets should include the following columns:\n- prompt: the prompt text\n- completions: a list of n completion steps\n- labels: a list of n labels indicating the “correctness” of each step" }, { "objectID": "docs/api/core.datasets.transforms.chat_builder.html", @@ -1311,14 +1332,14 @@ "href": "docs/api/core.trainers.mamba.html", "title": "core.trainers.mamba", "section": "", - "text": "core.trainers.mamba\nModule for mamba trainer\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlMambaTrainer\nMamba specific trainer to handle loss calculation\n\n\n\n\n\ncore.trainers.mamba.AxolotlMambaTrainer(\n self,\n *_args,\n bench_data_collator=None,\n eval_data_collator=None,\n dataset_tags=None,\n **kwargs,\n)\nMamba specific trainer to handle loss calculation" + "text": "core.trainers.mamba\nModule for mamba trainer\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlMambaTrainer\nMamba specific trainer to handle loss calculation\n\n\n\n\n\ncore.trainers.mamba.AxolotlMambaTrainer(\n *_args,\n bench_data_collator=None,\n eval_data_collator=None,\n dataset_tags=None,\n **kwargs,\n)\nMamba specific trainer to handle loss calculation" }, { "objectID": "docs/api/core.trainers.mamba.html#classes", "href": "docs/api/core.trainers.mamba.html#classes", "title": "core.trainers.mamba", "section": "", - "text": "Name\nDescription\n\n\n\n\nAxolotlMambaTrainer\nMamba specific trainer to handle loss calculation\n\n\n\n\n\ncore.trainers.mamba.AxolotlMambaTrainer(\n self,\n *_args,\n bench_data_collator=None,\n eval_data_collator=None,\n dataset_tags=None,\n **kwargs,\n)\nMamba specific trainer to handle loss calculation" + "text": "Name\nDescription\n\n\n\n\nAxolotlMambaTrainer\nMamba specific trainer to handle loss calculation\n\n\n\n\n\ncore.trainers.mamba.AxolotlMambaTrainer(\n *_args,\n bench_data_collator=None,\n eval_data_collator=None,\n dataset_tags=None,\n **kwargs,\n)\nMamba specific trainer to handle loss calculation" }, { "objectID": "docs/api/integrations.grokfast.optimizer.html", @@ -1332,14 +1353,14 @@ "href": "docs/api/prompt_strategies.chat_template.html", "title": "prompt_strategies.chat_template", "section": "", - "text": "prompt_strategies.chat_template\nHF Chat Templates prompt strategy\n\n\n\n\n\nName\nDescription\n\n\n\n\nChatTemplatePrompter\nPrompter for HF chat templates\n\n\nChatTemplateStrategy\nTokenizing strategy for instruction-based prompts.\n\n\nStrategyLoader\nLoad chat template strategy based on configuration.\n\n\n\n\n\nprompt_strategies.chat_template.ChatTemplatePrompter(\n self,\n tokenizer,\n chat_template,\n processor=None,\n max_length=2048,\n message_property_mappings=None,\n message_field_training=None,\n message_field_training_detail=None,\n field_messages='messages',\n field_system='system',\n roles=None,\n drop_system_message=False,\n)\nPrompter for HF chat templates\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs,\n sequence_len,\n roles_to_train=None,\n train_on_eos=None,\n train_on_eot=None,\n eot_tokens=None,\n split_thinking=False,\n)\nTokenizing strategy for instruction-based prompts.\n\n\n\n\n\nName\nDescription\n\n\n\n\nfind_first_eot_token\nFind the first EOT token in the input_ids starting from start_idx.\n\n\nfind_turn\nLocate the starting and ending indices of the specified turn in a conversation.\n\n\ntokenize_prompt\nPublic method that can handle either a single prompt or a batch of prompts.\n\n\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy.find_first_eot_token(\n input_ids,\n start_idx,\n)\nFind the first EOT token in the input_ids starting from start_idx.\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy.find_turn(turns, turn_idx)\nLocate the starting and ending indices of the specified turn in a conversation.\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy.tokenize_prompt(prompt)\nPublic method that can handle either a single prompt or a batch of prompts.\n\n\n\n\n\nprompt_strategies.chat_template.StrategyLoader()\nLoad chat template strategy based on configuration." + "text": "prompt_strategies.chat_template\nHF Chat Templates prompt strategy\n\n\n\n\n\nName\nDescription\n\n\n\n\nChatTemplatePrompter\nPrompter for HF chat templates\n\n\nChatTemplateStrategy\nTokenizing strategy for instruction-based prompts.\n\n\nStrategyLoader\nLoad chat template strategy based on configuration.\n\n\n\n\n\nprompt_strategies.chat_template.ChatTemplatePrompter(\n tokenizer,\n chat_template,\n processor=None,\n max_length=2048,\n message_property_mappings=None,\n message_field_training=None,\n message_field_training_detail=None,\n field_messages='messages',\n field_system='system',\n roles=None,\n drop_system_message=False,\n)\nPrompter for HF chat templates\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy(\n prompter,\n tokenizer,\n train_on_inputs,\n sequence_len,\n roles_to_train=None,\n train_on_eos=None,\n train_on_eot=None,\n eot_tokens=None,\n split_thinking=False,\n)\nTokenizing strategy for instruction-based prompts.\n\n\n\n\n\nName\nDescription\n\n\n\n\nfind_first_eot_token\nFind the first EOT token in the input_ids starting from start_idx.\n\n\nfind_turn\nLocate the starting and ending indices of the specified turn in a conversation.\n\n\ntokenize_prompt\nPublic method that can handle either a single prompt or a batch of prompts.\n\n\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy.find_first_eot_token(\n input_ids,\n start_idx,\n)\nFind the first EOT token in the input_ids starting from start_idx.\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy.find_turn(turns, turn_idx)\nLocate the starting and ending indices of the specified turn in a conversation.\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy.tokenize_prompt(prompt)\nPublic method that can handle either a single prompt or a batch of prompts.\n\n\n\n\n\nprompt_strategies.chat_template.StrategyLoader()\nLoad chat template strategy based on configuration." }, { "objectID": "docs/api/prompt_strategies.chat_template.html#classes", "href": "docs/api/prompt_strategies.chat_template.html#classes", "title": "prompt_strategies.chat_template", "section": "", - "text": "Name\nDescription\n\n\n\n\nChatTemplatePrompter\nPrompter for HF chat templates\n\n\nChatTemplateStrategy\nTokenizing strategy for instruction-based prompts.\n\n\nStrategyLoader\nLoad chat template strategy based on configuration.\n\n\n\n\n\nprompt_strategies.chat_template.ChatTemplatePrompter(\n self,\n tokenizer,\n chat_template,\n processor=None,\n max_length=2048,\n message_property_mappings=None,\n message_field_training=None,\n message_field_training_detail=None,\n field_messages='messages',\n field_system='system',\n roles=None,\n drop_system_message=False,\n)\nPrompter for HF chat templates\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs,\n sequence_len,\n roles_to_train=None,\n train_on_eos=None,\n train_on_eot=None,\n eot_tokens=None,\n split_thinking=False,\n)\nTokenizing strategy for instruction-based prompts.\n\n\n\n\n\nName\nDescription\n\n\n\n\nfind_first_eot_token\nFind the first EOT token in the input_ids starting from start_idx.\n\n\nfind_turn\nLocate the starting and ending indices of the specified turn in a conversation.\n\n\ntokenize_prompt\nPublic method that can handle either a single prompt or a batch of prompts.\n\n\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy.find_first_eot_token(\n input_ids,\n start_idx,\n)\nFind the first EOT token in the input_ids starting from start_idx.\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy.find_turn(turns, turn_idx)\nLocate the starting and ending indices of the specified turn in a conversation.\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy.tokenize_prompt(prompt)\nPublic method that can handle either a single prompt or a batch of prompts.\n\n\n\n\n\nprompt_strategies.chat_template.StrategyLoader()\nLoad chat template strategy based on configuration." + "text": "Name\nDescription\n\n\n\n\nChatTemplatePrompter\nPrompter for HF chat templates\n\n\nChatTemplateStrategy\nTokenizing strategy for instruction-based prompts.\n\n\nStrategyLoader\nLoad chat template strategy based on configuration.\n\n\n\n\n\nprompt_strategies.chat_template.ChatTemplatePrompter(\n tokenizer,\n chat_template,\n processor=None,\n max_length=2048,\n message_property_mappings=None,\n message_field_training=None,\n message_field_training_detail=None,\n field_messages='messages',\n field_system='system',\n roles=None,\n drop_system_message=False,\n)\nPrompter for HF chat templates\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy(\n prompter,\n tokenizer,\n train_on_inputs,\n sequence_len,\n roles_to_train=None,\n train_on_eos=None,\n train_on_eot=None,\n eot_tokens=None,\n split_thinking=False,\n)\nTokenizing strategy for instruction-based prompts.\n\n\n\n\n\nName\nDescription\n\n\n\n\nfind_first_eot_token\nFind the first EOT token in the input_ids starting from start_idx.\n\n\nfind_turn\nLocate the starting and ending indices of the specified turn in a conversation.\n\n\ntokenize_prompt\nPublic method that can handle either a single prompt or a batch of prompts.\n\n\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy.find_first_eot_token(\n input_ids,\n start_idx,\n)\nFind the first EOT token in the input_ids starting from start_idx.\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy.find_turn(turns, turn_idx)\nLocate the starting and ending indices of the specified turn in a conversation.\n\n\n\nprompt_strategies.chat_template.ChatTemplateStrategy.tokenize_prompt(prompt)\nPublic method that can handle either a single prompt or a batch of prompts.\n\n\n\n\n\nprompt_strategies.chat_template.StrategyLoader()\nLoad chat template strategy based on configuration." }, { "objectID": "docs/api/utils.schemas.multimodal.html", @@ -1430,14 +1451,14 @@ "href": "docs/api/utils.freeze.html", "title": "utils.freeze", "section": "", - "text": "utils.freeze\nmodule to freeze/unfreeze parameters by name\n\n\n\n\n\nName\nDescription\n\n\n\n\nLayerNamePattern\nRepresents a regex pattern for layer names, potentially including a parameter index range.\n\n\n\n\n\nutils.freeze.LayerNamePattern(self, pattern)\nRepresents a regex pattern for layer names, potentially including a parameter index range.\n\n\n\n\n\nName\nDescription\n\n\n\n\nmatch\nChecks if the given layer name matches the regex pattern.\n\n\n\n\n\nutils.freeze.LayerNamePattern.match(name)\nChecks if the given layer name matches the regex pattern.\nParameters:\n- name (str): The layer name to check.\nReturns:\n- bool: True if the layer name matches the pattern, False otherwise.\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nfreeze_layers_except\nFreezes all layers of the given model except for the layers that match given regex patterns.\n\n\n\n\n\nutils.freeze.freeze_layers_except(model, regex_patterns)\nFreezes all layers of the given model except for the layers that match given regex patterns.\nPeriods in the patterns are treated as literal periods, not as wildcard characters.\nParameters:\n- model (nn.Module): The PyTorch model to be modified.\n- regex_patterns (list of str): List of regex patterns to match layer names to keep unfrozen.\nNote that you cannot use a dot as a wildcard character in the patterns since it is reserved for separating layer names.\nAlso, to match the entire layer name, the pattern should start with “^” and end with “\\(\", otherwise it will match any part of the layer name.\n The range pattern part is optional and it is not compiled as a regex pattern which means you must put \"\\)” before the range pattern if you want to match the entire layer name.\nE.g., [“^model.embed_tokens.weight\\([:32000]\", \"layers.2[0-9]+.block_sparse_moe.gate.[a-z]+\\)”]\nReturns:\nNone; the model is modified in place." + "text": "utils.freeze\nmodule to freeze/unfreeze parameters by name\n\n\n\n\n\nName\nDescription\n\n\n\n\nLayerNamePattern\nRepresents a regex pattern for layer names, potentially including a parameter index range.\n\n\n\n\n\nutils.freeze.LayerNamePattern(pattern)\nRepresents a regex pattern for layer names, potentially including a parameter index range.\n\n\n\n\n\nName\nDescription\n\n\n\n\nmatch\nChecks if the given layer name matches the regex pattern.\n\n\n\n\n\nutils.freeze.LayerNamePattern.match(name)\nChecks if the given layer name matches the regex pattern.\nParameters:\n- name (str): The layer name to check.\nReturns:\n- bool: True if the layer name matches the pattern, False otherwise.\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nfreeze_layers_except\nFreezes all layers of the given model except for the layers that match given regex patterns.\n\n\n\n\n\nutils.freeze.freeze_layers_except(model, regex_patterns)\nFreezes all layers of the given model except for the layers that match given regex patterns.\nPeriods in the patterns are treated as literal periods, not as wildcard characters.\nParameters:\n- model (nn.Module): The PyTorch model to be modified.\n- regex_patterns (list of str): List of regex patterns to match layer names to keep unfrozen.\nNote that you cannot use a dot as a wildcard character in the patterns since it is reserved for separating layer names.\nAlso, to match the entire layer name, the pattern should start with “^” and end with “\\(\", otherwise it will match any part of the layer name.\n The range pattern part is optional and it is not compiled as a regex pattern which means you must put \"\\)” before the range pattern if you want to match the entire layer name.\nE.g., [“^model.embed_tokens.weight\\([:32000]\", \"layers.2[0-9]+.block_sparse_moe.gate.[a-z]+\\)”]\nReturns:\nNone; the model is modified in place." }, { "objectID": "docs/api/utils.freeze.html#classes", "href": "docs/api/utils.freeze.html#classes", "title": "utils.freeze", "section": "", - "text": "Name\nDescription\n\n\n\n\nLayerNamePattern\nRepresents a regex pattern for layer names, potentially including a parameter index range.\n\n\n\n\n\nutils.freeze.LayerNamePattern(self, pattern)\nRepresents a regex pattern for layer names, potentially including a parameter index range.\n\n\n\n\n\nName\nDescription\n\n\n\n\nmatch\nChecks if the given layer name matches the regex pattern.\n\n\n\n\n\nutils.freeze.LayerNamePattern.match(name)\nChecks if the given layer name matches the regex pattern.\nParameters:\n- name (str): The layer name to check.\nReturns:\n- bool: True if the layer name matches the pattern, False otherwise." + "text": "Name\nDescription\n\n\n\n\nLayerNamePattern\nRepresents a regex pattern for layer names, potentially including a parameter index range.\n\n\n\n\n\nutils.freeze.LayerNamePattern(pattern)\nRepresents a regex pattern for layer names, potentially including a parameter index range.\n\n\n\n\n\nName\nDescription\n\n\n\n\nmatch\nChecks if the given layer name matches the regex pattern.\n\n\n\n\n\nutils.freeze.LayerNamePattern.match(name)\nChecks if the given layer name matches the regex pattern.\nParameters:\n- name (str): The layer name to check.\nReturns:\n- bool: True if the layer name matches the pattern, False otherwise." }, { "objectID": "docs/api/utils.freeze.html#functions", @@ -1465,14 +1486,14 @@ "href": "docs/api/index.html", "title": "API Reference", "section": "", - "text": "Core functionality for training\n\n\n\ntrain\nPrepare and train a model on a dataset. Can also infer from a model or merge lora\n\n\nevaluate\nModule for evaluating models.\n\n\ndatasets\nModule containing Dataset functionality\n\n\nconvert\nModule containing File Reader, File Writer, Json Parser, and Jsonl Serializer classes\n\n\nprompt_tokenizers\nModule containing PromptTokenizingStrategy and Prompter classes\n\n\nlogging_config\nCommon logging module for axolotl\n\n\ncore.trainer_builder\nBuilder for the training args and trainer\n\n\ncore.training_args\nextra axolotl specific training args\n\n\ncore.chat.messages\ninternal message representations of chat messages\n\n\ncore.chat.format.chatml\nChatML transformation functions for MessageContents\n\n\ncore.chat.format.llama3x\nLlama 3.x chat formatting functions for MessageContents\n\n\ncore.chat.format.shared\nshared functions for format transforms\n\n\ncore.datasets.chat\nchat dataset module\n\n\ncore.datasets.transforms.chat_builder\nThis module contains a function that builds a transform that takes a row from the dataset and converts it to a Chat.\n\n\n\n\n\n\nCommand-line interface\n\n\n\ncli.main\nClick CLI definitions for various axolotl commands.\n\n\ncli.train\nCLI to run training on a model.\n\n\ncli.evaluate\nCLI to run evaluation on a model.\n\n\ncli.args\nModule for axolotl CLI command arguments.\n\n\ncli.checks\nVarious checks for Axolotl CLI.\n\n\ncli.config\nConfiguration loading and processing.\n\n\ncli.inference\nCLI to run inference on a trained model.\n\n\ncli.merge_lora\nCLI to merge a trained LoRA into a base model.\n\n\ncli.merge_sharded_fsdp_weights\nCLI to merge sharded FSDP model checkpoints into a single combined checkpoint.\n\n\ncli.preprocess\nCLI to run preprocessing of a dataset.\n\n\ncli.sweeps\nUtilities for handling sweeps over configs for axolotl train CLI command\n\n\ncli.utils\nUtility methods for axolotl CLI.\n\n\ncli.vllm_serve\nCLI to start the vllm server for online RL\n\n\ncli.cloud.base\nbase class for cloud platforms from cli\n\n\ncli.cloud.modal_\nModal Cloud support from CLI\n\n\ncli.quantize\nCLI to post-training quantize a model using torchao\n\n\n\n\n\n\nTraining implementations\n\n\n\ncore.trainers.base\nModule for customized trainers\n\n\ncore.trainers.trl\nModule for TRL PPO trainer\n\n\ncore.trainers.mamba\nModule for mamba trainer\n\n\ncore.trainers.relora\nModule for ReLoRA trainer\n\n\ncore.trainers.dpo.trainer\nDPO trainer for axolotl\n\n\ncore.trainers.grpo.trainer\nAxolotl GRPO trainers (with and without sequence parallelism handling)\n\n\ncore.trainers.grpo.sampler\nRepeat random sampler (similar to the one implemented in\n\n\ncore.trainers.utils\nUtils for Axolotl trainers\n\n\n\n\n\n\nFunctionality for loading and patching models, tokenizers, etc.\n\n\n\nloaders.model\nModel loader class implementation for loading, configuring, and patching various\n\n\nloaders.tokenizer\nTokenizer loading functionality and associated utils\n\n\nloaders.processor\nProcessor loading functionality for multi-modal models\n\n\nloaders.adapter\nAdapter loading functionality, including LoRA / QLoRA and associated utils\n\n\nloaders.patch_manager\nPatch manager class implementation to complement axolotl.loaders.ModelLoader.\n\n\nloaders.constants\nShared constants for axolotl.loaders module\n\n\n\n\n\n\nMixin classes for augmenting trainers\n\n\n\ncore.trainers.mixins.optimizer\nModule for Axolotl trainer optimizer mixin\n\n\ncore.trainers.mixins.rng_state_loader\nTemporary fix/override for bug in resume from checkpoint\n\n\ncore.trainers.mixins.scheduler\nModule for Axolotl trainer scheduler mixin\n\n\n\n\n\n\nContext managers for altering trainer behaviors\n\n\n\nutils.ctx_managers.sequence_parallel\nModule for Axolotl trainer sequence parallelism manager and utilities\n\n\n\n\n\n\nPrompt formatting strategies\n\n\n\nprompt_strategies.base\nmodule for base dataset transform strategies\n\n\nprompt_strategies.chat_template\nHF Chat Templates prompt strategy\n\n\nprompt_strategies.alpaca_chat\nModule for Alpaca prompt strategy classes\n\n\nprompt_strategies.alpaca_instruct\nModule loading the AlpacaInstructPromptTokenizingStrategy class\n\n\nprompt_strategies.alpaca_w_system\nPrompt strategies loader for alpaca instruction datasets with system prompts\n\n\nprompt_strategies.user_defined\nUser Defined prompts with configuration from the YML config\n\n\nprompt_strategies.llama2_chat\nPrompt Strategy for finetuning Llama2 chat models\n\n\nprompt_strategies.completion\nBasic completion text\n\n\nprompt_strategies.input_output\nModule for plain input/output prompt pairs\n\n\nprompt_strategies.stepwise_supervised\nModule for stepwise datasets, typically including a prompt and reasoning traces,\n\n\nprompt_strategies.metharme\nModule containing the MetharmenPromptTokenizingStrategy and MetharmePrompter class\n\n\nprompt_strategies.orcamini\nPrompt Strategy for finetuning Orca Mini (v2) models\n\n\nprompt_strategies.pygmalion\nModule containing the PygmalionPromptTokenizingStrategy and PygmalionPrompter class\n\n\nprompt_strategies.messages.chat\nChat dataset wrapping strategy for new internal messages representations\n\n\nprompt_strategies.dpo.chat_template\nDPO prompt strategies for using tokenizer chat templates.\n\n\nprompt_strategies.dpo.llama3\nDPO strategies for llama-3 chat template\n\n\nprompt_strategies.dpo.chatml\nDPO strategies for chatml\n\n\nprompt_strategies.dpo.zephyr\nDPO strategies for zephyr\n\n\nprompt_strategies.dpo.user_defined\nUser-defined DPO strategies\n\n\nprompt_strategies.dpo.passthrough\nDPO prompt strategies passthrough/zero-processing strategy\n\n\nprompt_strategies.kto.llama3\nKTO strategies for llama-3 chat template\n\n\nprompt_strategies.kto.chatml\nKTO strategies for chatml\n\n\nprompt_strategies.kto.user_defined\nUser-defined KTO strategies\n\n\nprompt_strategies.orpo.chat_template\nchatml prompt tokenization strategy for ORPO\n\n\nprompt_strategies.bradley_terry.llama3\nchatml transforms for datasets with system, input, chosen, rejected to match llama3 chat template\n\n\n\n\n\n\nLow-level performance optimizations\n\n\n\nkernels.lora\nModule for definition of Low-Rank Adaptation (LoRA) Triton kernels.\n\n\nkernels.geglu\nModule for definition of GEGLU Triton kernels.\n\n\nkernels.swiglu\nModule for definition of SwiGLU Triton kernels.\n\n\nkernels.quantize\nDequantization utilities for bitsandbytes integration.\n\n\nkernels.utils\nUtilities for axolotl.kernels submodules.\n\n\n\n\n\n\nRuntime patches for model optimizations\n\n\n\nmonkeypatch.llama_attn_hijack_flash\nFlash attention monkey patch for llama model\n\n\nmonkeypatch.llama_attn_hijack_xformers\nDirectly copied the code from https://raw.githubusercontent.com/oobabooga/text-generation-webui/main/modules/llama_attn_hijack.py and made some adjustments\n\n\nmonkeypatch.mistral_attn_hijack_flash\nFlash attention monkey patch for mistral model\n\n\nmonkeypatch.multipack\nmultipack patching for v2 of sample packing\n\n\nmonkeypatch.relora\nImplements the ReLoRA training procedure from https://arxiv.org/abs/2307.05695, minus the initial full fine-tune.\n\n\nmonkeypatch.llama_expand_mask\nexpands the binary attention mask per 3.2.2 of https://arxiv.org/pdf/2107.02027.pdf\n\n\nmonkeypatch.lora_kernels\nModule for patching custom LoRA Triton kernels and torch.autograd functions.\n\n\nmonkeypatch.utils\nShared utils for the monkeypatches\n\n\nmonkeypatch.btlm_attn_hijack_flash\nFlash attention monkey patch for cerebras btlm model\n\n\nmonkeypatch.llama_patch_multipack\nPatched LlamaAttention to use torch.nn.functional.scaled_dot_product_attention\n\n\nmonkeypatch.stablelm_attn_hijack_flash\nPyTorch StableLM Epoch model.\n\n\nmonkeypatch.trainer_fsdp_optim\nfix for FSDP optimizer save in trainer w 4.47.0\n\n\nmonkeypatch.transformers_fa_utils\nsee https://github.com/huggingface/transformers/pull/35834\n\n\nmonkeypatch.unsloth_\nmodule for patching with unsloth optimizations\n\n\nmonkeypatch.attention.mllama\nMonkeypatch for Vision Llama for FA2 support\n\n\nmonkeypatch.data.batch_dataset_fetcher\nmonkey patches for the dataset fetcher to handle batches of packed indexes\n\n\nmonkeypatch.mixtral\nPatches to support multipack for mixtral\n\n\nmonkeypatch.gradient_checkpointing.offload_cpu\nCPU offloaded checkpointing\n\n\nmonkeypatch.gradient_checkpointing.offload_disk\nDISCO - DIsk-based Storage and Checkpointing with Optimized prefetching\n\n\n\n\n\n\nUtility functions\n\n\n\nutils.tokenization\nModule for tokenization utilities\n\n\nutils.chat_templates\nThis module provides functionality for selecting chat templates based on user choices.\n\n\nutils.lora\nmodule to get the state dict of a merged lora model\n\n\nutils.model_shard_quant\nmodule to handle loading model on cpu/meta device for FSDP\n\n\nutils.bench\nBenchmarking and measurement utilities\n\n\nutils.freeze\nmodule to freeze/unfreeze parameters by name\n\n\nutils.trainer\nModule containing the Trainer class and related functions\n\n\nutils.schedulers\nModule for custom LRScheduler class\n\n\nutils.distributed\nutility helpers for distributed checks\n\n\nutils.dict\nModule containing the DictDefault class\n\n\nutils.optimizers.adopt\nCopied from https://github.com/iShohei220/adopt\n\n\nutils.data.pretraining\ndata handling specific to pretraining\n\n\nutils.data.sft\ndata handling specific to SFT\n\n\nutils.quantization\nUtilities for quantization including QAT and PTQ using torchao.\n\n\n\n\n\n\nPydantic data models for Axolotl config\n\n\n\nutils.schemas.config\nModule with Pydantic models for configuration.\n\n\nutils.schemas.model\nPydantic models for model input / output, etc. configuration\n\n\nutils.schemas.training\nPydantic models for training hyperparameters\n\n\nutils.schemas.datasets\nPydantic models for datasets-related configuration\n\n\nutils.schemas.peft\nPydantic models for PEFT-related configuration\n\n\nutils.schemas.trl\nPydantic models for TRL trainer configuration\n\n\nutils.schemas.multimodal\nPydantic models for multimodal-related configuration\n\n\nutils.schemas.integrations\nPydantic models for Axolotl integrations\n\n\nutils.schemas.enums\nEnums for Axolotl input config\n\n\nutils.schemas.utils\nUtilities for Axolotl Pydantic models\n\n\n\n\n\n\nThird-party integrations and extensions\n\n\n\nintegrations.base\nBase class for all plugins.\n\n\nintegrations.cut_cross_entropy.args\nModule for handling Cut Cross Entropy input arguments.\n\n\nintegrations.grokfast.optimizer\n\n\n\nintegrations.kd.trainer\nKD trainer\n\n\nintegrations.liger.args\nModule for handling LIGER input arguments.\n\n\nintegrations.lm_eval.args\nModule for handling lm eval harness input arguments.\n\n\nintegrations.spectrum.args\nModule for handling Spectrum input arguments.\n\n\n\n\n\n\nCommon utilities and shared functionality\n\n\n\ncommon.architectures\nCommon architecture specific constants\n\n\ncommon.const\nVarious shared constants\n\n\ncommon.datasets\nDataset loading utilities.\n\n\n\n\n\n\nCustom model implementations\n\n\n\nmodels.mamba.modeling_mamba\n\n\n\n\n\n\n\nData processing utilities\n\n\n\nutils.collators.core\nbasic shared collator constants\n\n\nutils.collators.batching\nData collators for axolotl to pad labels and position_ids for packed sequences\n\n\nutils.collators.mamba\ncollators for Mamba\n\n\nutils.collators.mm_chat\nCollators for multi-modal chat messages and packing\n\n\nutils.samplers.multipack\nMultipack Batch Sampler - An efficient batch sampler for packing variable-length sequences\n\n\n\n\n\n\nTraining callbacks\n\n\n\nutils.callbacks.perplexity\ncallback to calculate perplexity as an evaluation metric.\n\n\nutils.callbacks.profiler\nHF Trainer callback for creating pytorch profiling snapshots\n\n\nutils.callbacks.lisa\nmodule for LISA\n\n\nutils.callbacks.mlflow_\nMLFlow module for trainer callbacks\n\n\nutils.callbacks.comet_\nComet module for trainer callbacks\n\n\nutils.callbacks.qat\nQAT Callback for HF Causal Trainer" + "text": "Core functionality for training\n\n\n\ntrain\nPrepare and train a model on a dataset. Can also infer from a model or merge lora\n\n\nevaluate\nModule for evaluating models.\n\n\ndatasets\nModule containing Dataset functionality\n\n\nconvert\nModule containing File Reader, File Writer, Json Parser, and Jsonl Serializer classes\n\n\nprompt_tokenizers\nModule containing PromptTokenizingStrategy and Prompter classes\n\n\nlogging_config\nCommon logging module for axolotl\n\n\ncore.builders.base\nBase class for trainer builder\n\n\ncore.builders.causal\nBuilder for causal trainers\n\n\ncore.builders.rl\nBuilder for RLHF trainers\n\n\ncore.training_args\nextra axolotl specific training args\n\n\ncore.chat.messages\ninternal message representations of chat messages\n\n\ncore.chat.format.chatml\nChatML transformation functions for MessageContents\n\n\ncore.chat.format.llama3x\nLlama 3.x chat formatting functions for MessageContents\n\n\ncore.chat.format.shared\nshared functions for format transforms\n\n\ncore.datasets.chat\nchat dataset module\n\n\ncore.datasets.transforms.chat_builder\nThis module contains a function that builds a transform that takes a row from the dataset and converts it to a Chat.\n\n\n\n\n\n\nCommand-line interface\n\n\n\ncli.main\nClick CLI definitions for various axolotl commands.\n\n\ncli.train\nCLI to run training on a model.\n\n\ncli.evaluate\nCLI to run evaluation on a model.\n\n\ncli.args\nModule for axolotl CLI command arguments.\n\n\ncli.checks\nVarious checks for Axolotl CLI.\n\n\ncli.config\nConfiguration loading and processing.\n\n\ncli.inference\nCLI to run inference on a trained model.\n\n\ncli.merge_lora\nCLI to merge a trained LoRA into a base model.\n\n\ncli.merge_sharded_fsdp_weights\nCLI to merge sharded FSDP model checkpoints into a single combined checkpoint.\n\n\ncli.preprocess\nCLI to run preprocessing of a dataset.\n\n\ncli.sweeps\nUtilities for handling sweeps over configs for axolotl train CLI command\n\n\ncli.utils\nUtility methods for axolotl CLI.\n\n\ncli.vllm_serve\nCLI to start the vllm server for online RL\n\n\ncli.cloud.base\nbase class for cloud platforms from cli\n\n\ncli.cloud.modal_\nModal Cloud support from CLI\n\n\ncli.quantize\nCLI to post-training quantize a model using torchao\n\n\n\n\n\n\nTraining implementations\n\n\n\ncore.trainers.base\nModule for customized trainers\n\n\ncore.trainers.trl\nModule for TRL PPO trainer\n\n\ncore.trainers.mamba\nModule for mamba trainer\n\n\ncore.trainers.relora\nModule for ReLoRA trainer\n\n\ncore.trainers.dpo.trainer\nDPO trainer for axolotl\n\n\ncore.trainers.grpo.trainer\nAxolotl GRPO trainers (with and without sequence parallelism handling)\n\n\ncore.trainers.grpo.sampler\nRepeat random sampler (similar to the one implemented in\n\n\ncore.trainers.utils\nUtils for Axolotl trainers\n\n\n\n\n\n\nFunctionality for loading and patching models, tokenizers, etc.\n\n\n\nloaders.model\nModel loader class implementation for loading, configuring, and patching various\n\n\nloaders.tokenizer\nTokenizer loading functionality and associated utils\n\n\nloaders.processor\nProcessor loading functionality for multi-modal models\n\n\nloaders.adapter\nAdapter loading functionality, including LoRA / QLoRA and associated utils\n\n\nloaders.patch_manager\nPatch manager class implementation to complement axolotl.loaders.ModelLoader.\n\n\nloaders.constants\nShared constants for axolotl.loaders module\n\n\n\n\n\n\nMixin classes for augmenting trainers\n\n\n\ncore.trainers.mixins.optimizer\nModule for Axolotl trainer optimizer mixin\n\n\ncore.trainers.mixins.rng_state_loader\nTemporary fix/override for bug in resume from checkpoint\n\n\ncore.trainers.mixins.scheduler\nModule for Axolotl trainer scheduler mixin\n\n\n\n\n\n\nContext managers for altering trainer behaviors\n\n\n\nutils.ctx_managers.sequence_parallel\nModule for Axolotl trainer sequence parallelism manager and utilities\n\n\n\n\n\n\nPrompt formatting strategies\n\n\n\nprompt_strategies.base\nmodule for base dataset transform strategies\n\n\nprompt_strategies.chat_template\nHF Chat Templates prompt strategy\n\n\nprompt_strategies.alpaca_chat\nModule for Alpaca prompt strategy classes\n\n\nprompt_strategies.alpaca_instruct\nModule loading the AlpacaInstructPromptTokenizingStrategy class\n\n\nprompt_strategies.alpaca_w_system\nPrompt strategies loader for alpaca instruction datasets with system prompts\n\n\nprompt_strategies.user_defined\nUser Defined prompts with configuration from the YML config\n\n\nprompt_strategies.llama2_chat\nPrompt Strategy for finetuning Llama2 chat models\n\n\nprompt_strategies.completion\nBasic completion text\n\n\nprompt_strategies.input_output\nModule for plain input/output prompt pairs\n\n\nprompt_strategies.stepwise_supervised\nModule for stepwise datasets, typically including a prompt and reasoning traces,\n\n\nprompt_strategies.metharme\nModule containing the MetharmenPromptTokenizingStrategy and MetharmePrompter class\n\n\nprompt_strategies.orcamini\nPrompt Strategy for finetuning Orca Mini (v2) models\n\n\nprompt_strategies.pygmalion\nModule containing the PygmalionPromptTokenizingStrategy and PygmalionPrompter class\n\n\nprompt_strategies.messages.chat\nChat dataset wrapping strategy for new internal messages representations\n\n\nprompt_strategies.dpo.chat_template\nDPO prompt strategies for using tokenizer chat templates.\n\n\nprompt_strategies.dpo.llama3\nDPO strategies for llama-3 chat template\n\n\nprompt_strategies.dpo.chatml\nDPO strategies for chatml\n\n\nprompt_strategies.dpo.zephyr\nDPO strategies for zephyr\n\n\nprompt_strategies.dpo.user_defined\nUser-defined DPO strategies\n\n\nprompt_strategies.dpo.passthrough\nDPO prompt strategies passthrough/zero-processing strategy\n\n\nprompt_strategies.kto.llama3\nKTO strategies for llama-3 chat template\n\n\nprompt_strategies.kto.chatml\nKTO strategies for chatml\n\n\nprompt_strategies.kto.user_defined\nUser-defined KTO strategies\n\n\nprompt_strategies.orpo.chat_template\nchatml prompt tokenization strategy for ORPO\n\n\nprompt_strategies.bradley_terry.llama3\nchatml transforms for datasets with system, input, chosen, rejected to match llama3 chat template\n\n\n\n\n\n\nLow-level performance optimizations\n\n\n\nkernels.lora\nModule for definition of Low-Rank Adaptation (LoRA) Triton kernels.\n\n\nkernels.geglu\nModule for definition of GEGLU Triton kernels.\n\n\nkernels.swiglu\nModule for definition of SwiGLU Triton kernels.\n\n\nkernels.quantize\nDequantization utilities for bitsandbytes integration.\n\n\nkernels.utils\nUtilities for axolotl.kernels submodules.\n\n\n\n\n\n\nRuntime patches for model optimizations\n\n\n\nmonkeypatch.llama_attn_hijack_flash\nFlash attention monkey patch for llama model\n\n\nmonkeypatch.llama_attn_hijack_xformers\nDirectly copied the code from https://raw.githubusercontent.com/oobabooga/text-generation-webui/main/modules/llama_attn_hijack.py and made some adjustments\n\n\nmonkeypatch.mistral_attn_hijack_flash\nFlash attention monkey patch for mistral model\n\n\nmonkeypatch.multipack\nmultipack patching for v2 of sample packing\n\n\nmonkeypatch.relora\nImplements the ReLoRA training procedure from https://arxiv.org/abs/2307.05695, minus the initial full fine-tune.\n\n\nmonkeypatch.llama_expand_mask\nexpands the binary attention mask per 3.2.2 of https://arxiv.org/pdf/2107.02027.pdf\n\n\nmonkeypatch.lora_kernels\nModule for patching custom LoRA Triton kernels and torch.autograd functions.\n\n\nmonkeypatch.utils\nShared utils for the monkeypatches\n\n\nmonkeypatch.btlm_attn_hijack_flash\nFlash attention monkey patch for cerebras btlm model\n\n\nmonkeypatch.llama_patch_multipack\nPatched LlamaAttention to use torch.nn.functional.scaled_dot_product_attention\n\n\nmonkeypatch.stablelm_attn_hijack_flash\nPyTorch StableLM Epoch model.\n\n\nmonkeypatch.trainer_fsdp_optim\nfix for FSDP optimizer save in trainer w 4.47.0\n\n\nmonkeypatch.transformers_fa_utils\nsee https://github.com/huggingface/transformers/pull/35834\n\n\nmonkeypatch.unsloth_\nmodule for patching with unsloth optimizations\n\n\nmonkeypatch.attention.mllama\nMonkeypatch for Vision Llama for FA2 support\n\n\nmonkeypatch.data.batch_dataset_fetcher\nmonkey patches for the dataset fetcher to handle batches of packed indexes\n\n\nmonkeypatch.mixtral\nPatches to support multipack for mixtral\n\n\nmonkeypatch.gradient_checkpointing.offload_cpu\nCPU offloaded checkpointing\n\n\nmonkeypatch.gradient_checkpointing.offload_disk\nDISCO - DIsk-based Storage and Checkpointing with Optimized prefetching\n\n\n\n\n\n\nUtility functions\n\n\n\nutils.tokenization\nModule for tokenization utilities\n\n\nutils.chat_templates\nThis module provides functionality for selecting chat templates based on user choices.\n\n\nutils.lora\nmodule to get the state dict of a merged lora model\n\n\nutils.model_shard_quant\nmodule to handle loading model on cpu/meta device for FSDP\n\n\nutils.bench\nBenchmarking and measurement utilities\n\n\nutils.freeze\nmodule to freeze/unfreeze parameters by name\n\n\nutils.trainer\nModule containing the Trainer class and related functions\n\n\nutils.schedulers\nModule for custom LRScheduler class\n\n\nutils.distributed\nutility helpers for distributed checks\n\n\nutils.dict\nModule containing the DictDefault class\n\n\nutils.optimizers.adopt\nCopied from https://github.com/iShohei220/adopt\n\n\nutils.data.pretraining\ndata handling specific to pretraining\n\n\nutils.data.sft\ndata handling specific to SFT\n\n\nutils.quantization\nUtilities for quantization including QAT and PTQ using torchao.\n\n\n\n\n\n\nPydantic data models for Axolotl config\n\n\n\nutils.schemas.config\nModule with Pydantic models for configuration.\n\n\nutils.schemas.model\nPydantic models for model input / output, etc. configuration\n\n\nutils.schemas.training\nPydantic models for training hyperparameters\n\n\nutils.schemas.datasets\nPydantic models for datasets-related configuration\n\n\nutils.schemas.peft\nPydantic models for PEFT-related configuration\n\n\nutils.schemas.trl\nPydantic models for TRL trainer configuration\n\n\nutils.schemas.multimodal\nPydantic models for multimodal-related configuration\n\n\nutils.schemas.integrations\nPydantic models for Axolotl integrations\n\n\nutils.schemas.enums\nEnums for Axolotl input config\n\n\nutils.schemas.utils\nUtilities for Axolotl Pydantic models\n\n\n\n\n\n\nThird-party integrations and extensions\n\n\n\nintegrations.base\nBase class for all plugins.\n\n\nintegrations.cut_cross_entropy.args\nModule for handling Cut Cross Entropy input arguments.\n\n\nintegrations.grokfast.optimizer\n\n\n\nintegrations.kd.trainer\nKD trainer\n\n\nintegrations.liger.args\nModule for handling LIGER input arguments.\n\n\nintegrations.lm_eval.args\nModule for handling lm eval harness input arguments.\n\n\nintegrations.spectrum.args\nModule for handling Spectrum input arguments.\n\n\n\n\n\n\nCommon utilities and shared functionality\n\n\n\ncommon.architectures\nCommon architecture specific constants\n\n\ncommon.const\nVarious shared constants\n\n\ncommon.datasets\nDataset loading utilities.\n\n\n\n\n\n\nCustom model implementations\n\n\n\nmodels.mamba.modeling_mamba\n\n\n\n\n\n\n\nData processing utilities\n\n\n\nutils.collators.core\nbasic shared collator constants\n\n\nutils.collators.batching\nData collators for axolotl to pad labels and position_ids for packed sequences\n\n\nutils.collators.mamba\ncollators for Mamba\n\n\nutils.collators.mm_chat\nCollators for multi-modal chat messages and packing\n\n\nutils.samplers.multipack\nMultipack Batch Sampler - An efficient batch sampler for packing variable-length sequences\n\n\n\n\n\n\nTraining callbacks\n\n\n\nutils.callbacks.perplexity\ncallback to calculate perplexity as an evaluation metric.\n\n\nutils.callbacks.profiler\nHF Trainer callback for creating pytorch profiling snapshots\n\n\nutils.callbacks.lisa\nmodule for LISA\n\n\nutils.callbacks.mlflow_\nMLFlow module for trainer callbacks\n\n\nutils.callbacks.comet_\nComet module for trainer callbacks\n\n\nutils.callbacks.qat\nQAT Callback for HF Causal Trainer" }, { "objectID": "docs/api/index.html#core", "href": "docs/api/index.html#core", "title": "API Reference", "section": "", - "text": "Core functionality for training\n\n\n\ntrain\nPrepare and train a model on a dataset. Can also infer from a model or merge lora\n\n\nevaluate\nModule for evaluating models.\n\n\ndatasets\nModule containing Dataset functionality\n\n\nconvert\nModule containing File Reader, File Writer, Json Parser, and Jsonl Serializer classes\n\n\nprompt_tokenizers\nModule containing PromptTokenizingStrategy and Prompter classes\n\n\nlogging_config\nCommon logging module for axolotl\n\n\ncore.trainer_builder\nBuilder for the training args and trainer\n\n\ncore.training_args\nextra axolotl specific training args\n\n\ncore.chat.messages\ninternal message representations of chat messages\n\n\ncore.chat.format.chatml\nChatML transformation functions for MessageContents\n\n\ncore.chat.format.llama3x\nLlama 3.x chat formatting functions for MessageContents\n\n\ncore.chat.format.shared\nshared functions for format transforms\n\n\ncore.datasets.chat\nchat dataset module\n\n\ncore.datasets.transforms.chat_builder\nThis module contains a function that builds a transform that takes a row from the dataset and converts it to a Chat." + "text": "Core functionality for training\n\n\n\ntrain\nPrepare and train a model on a dataset. Can also infer from a model or merge lora\n\n\nevaluate\nModule for evaluating models.\n\n\ndatasets\nModule containing Dataset functionality\n\n\nconvert\nModule containing File Reader, File Writer, Json Parser, and Jsonl Serializer classes\n\n\nprompt_tokenizers\nModule containing PromptTokenizingStrategy and Prompter classes\n\n\nlogging_config\nCommon logging module for axolotl\n\n\ncore.builders.base\nBase class for trainer builder\n\n\ncore.builders.causal\nBuilder for causal trainers\n\n\ncore.builders.rl\nBuilder for RLHF trainers\n\n\ncore.training_args\nextra axolotl specific training args\n\n\ncore.chat.messages\ninternal message representations of chat messages\n\n\ncore.chat.format.chatml\nChatML transformation functions for MessageContents\n\n\ncore.chat.format.llama3x\nLlama 3.x chat formatting functions for MessageContents\n\n\ncore.chat.format.shared\nshared functions for format transforms\n\n\ncore.datasets.chat\nchat dataset module\n\n\ncore.datasets.transforms.chat_builder\nThis module contains a function that builds a transform that takes a row from the dataset and converts it to a Chat." }, { "objectID": "docs/api/index.html#cli", @@ -1608,389 +1629,361 @@ "text": "Name\nDescription\n\n\n\n\nargilla_chat\nfor argilla/dpo-mix-7k conversations\n\n\nicr\nchatml transforms for datasets with system, input, chosen, rejected\n\n\nintel\nFor Intel Orca DPO Pairs\n\n\nultra\nfor ultrafeedback binarized conversations\n\n\n\n\n\nprompt_strategies.dpo.chatml.argilla_chat(cfg, **kwargs)\nfor argilla/dpo-mix-7k conversations\n\n\n\nprompt_strategies.dpo.chatml.icr(cfg, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected\nex. https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs\n\n\n\nprompt_strategies.dpo.chatml.intel(cfg, **kwargs)\nFor Intel Orca DPO Pairs\n\n\n\nprompt_strategies.dpo.chatml.ultra(cfg, **kwargs)\nfor ultrafeedback binarized conversations" }, { - "objectID": "docs/api/core.trainer_builder.html", - "href": "docs/api/core.trainer_builder.html", - "title": "core.trainer_builder", + "objectID": "docs/api/models.mamba.modeling_mamba.html", + "href": "docs/api/models.mamba.modeling_mamba.html", + "title": "models.mamba.modeling_mamba", "section": "", - "text": "core.trainer_builder\nBuilder for the training args and trainer\n\n\n\n\n\nName\nDescription\n\n\n\n\nHFCausalTrainerBuilder\nBuild the HuggingFace training args/trainer for causal models and reward modeling\n\n\nHFPPOTrainerBuilder\nHF Factory class for PPO Trainer\n\n\nHFRLTrainerBuilder\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)\n\n\nTrainerBuilderBase\nBase class for trainer builder.\n\n\n\n\n\ncore.trainer_builder.HFCausalTrainerBuilder(\n self,\n cfg,\n model,\n tokenizer,\n processor=None,\n)\nBuild the HuggingFace training args/trainer for causal models and reward modeling\nusing TRL.\n\n\n\ncore.trainer_builder.HFPPOTrainerBuilder(\n self,\n cfg,\n model,\n tokenizer,\n processor=None,\n)\nHF Factory class for PPO Trainer\n\n\n\ncore.trainer_builder.HFRLTrainerBuilder(\n self,\n cfg,\n model,\n tokenizer,\n processor=None,\n)\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)\n\n\n\ncore.trainer_builder.TrainerBuilderBase(\n self,\n cfg,\n model,\n tokenizer,\n processor=None,\n)\nBase class for trainer builder.\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_post_trainer_create_callbacks\nCallbacks added after the trainer is created, usually b/c these need access to the trainer\n\n\n\n\n\ncore.trainer_builder.TrainerBuilderBase.get_post_trainer_create_callbacks(\n trainer,\n)\nCallbacks added after the trainer is created, usually b/c these need access to the trainer" + "text": "models.mamba.modeling_mamba\nmodels.mamba.modeling_mamba" }, { - "objectID": "docs/api/core.trainer_builder.html#classes", - "href": "docs/api/core.trainer_builder.html#classes", - "title": "core.trainer_builder", + "objectID": "docs/api/prompt_strategies.dpo.chat_template.html", + "href": "docs/api/prompt_strategies.dpo.chat_template.html", + "title": "prompt_strategies.dpo.chat_template", "section": "", - "text": "Name\nDescription\n\n\n\n\nHFCausalTrainerBuilder\nBuild the HuggingFace training args/trainer for causal models and reward modeling\n\n\nHFPPOTrainerBuilder\nHF Factory class for PPO Trainer\n\n\nHFRLTrainerBuilder\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)\n\n\nTrainerBuilderBase\nBase class for trainer builder.\n\n\n\n\n\ncore.trainer_builder.HFCausalTrainerBuilder(\n self,\n cfg,\n model,\n tokenizer,\n processor=None,\n)\nBuild the HuggingFace training args/trainer for causal models and reward modeling\nusing TRL.\n\n\n\ncore.trainer_builder.HFPPOTrainerBuilder(\n self,\n cfg,\n model,\n tokenizer,\n processor=None,\n)\nHF Factory class for PPO Trainer\n\n\n\ncore.trainer_builder.HFRLTrainerBuilder(\n self,\n cfg,\n model,\n tokenizer,\n processor=None,\n)\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)\n\n\n\ncore.trainer_builder.TrainerBuilderBase(\n self,\n cfg,\n model,\n tokenizer,\n processor=None,\n)\nBase class for trainer builder.\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_post_trainer_create_callbacks\nCallbacks added after the trainer is created, usually b/c these need access to the trainer\n\n\n\n\n\ncore.trainer_builder.TrainerBuilderBase.get_post_trainer_create_callbacks(\n trainer,\n)\nCallbacks added after the trainer is created, usually b/c these need access to the trainer" + "text": "prompt_strategies.dpo.chat_template\nprompt_strategies.dpo.chat_template\nDPO prompt strategies for using tokenizer chat templates." }, { - "objectID": "docs/api/train.html", - "href": "docs/api/train.html", - "title": "train", + "objectID": "docs/api/monkeypatch.llama_attn_hijack_xformers.html", + "href": "docs/api/monkeypatch.llama_attn_hijack_xformers.html", + "title": "monkeypatch.llama_attn_hijack_xformers", "section": "", - "text": "train\nPrepare and train a model on a dataset. Can also infer from a model or merge lora\n\n\n\n\n\nName\nDescription\n\n\n\n\ncreate_model_card\nCreate a model card for the trained model if needed.\n\n\ndetermine_resume_checkpoint\nDetermine the checkpoint to resume from based on configuration.\n\n\nexecute_training\nExecute the training process with appropriate SDP kernel configurations.\n\n\nhandle_untrained_tokens_fix\nApply fixes for untrained tokens if configured.\n\n\nsave_initial_configs\nSave initial configurations before training.\n\n\nsave_trained_model\nSave the trained model according to configuration and training setup.\n\n\nsetup_model_and_tokenizer\nLoad the tokenizer, processor (for multimodal models), and model based on configuration.\n\n\nsetup_model_and_trainer\nLoad model, tokenizer, trainer, etc. Helper function to encapsulate the full\n\n\nsetup_model_card\nSet up the Axolotl badge and add the Axolotl config to the model card if available.\n\n\nsetup_reference_model\nSet up the reference model for RL training if needed.\n\n\nsetup_signal_handler\nSet up signal handler for graceful termination.\n\n\ntrain\nTrain a model on the given dataset.\n\n\n\n\n\ntrain.create_model_card(cfg, trainer)\nCreate a model card for the trained model if needed.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object with model card creation capabilities.\nrequired\n\n\n\n\n\n\n\ntrain.determine_resume_checkpoint(cfg)\nDetermine the checkpoint to resume from based on configuration.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nstr | None\nPath to the checkpoint to resume from, or None if not resuming.\n\n\n\n\n\n\n\ntrain.execute_training(cfg, trainer, resume_from_checkpoint)\nExecute the training process with appropriate SDP kernel configurations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntrainer\nAny\nThe configured trainer object.\nrequired\n\n\nresume_from_checkpoint\nstr | None\nPath to checkpoint to resume from, if applicable.\nrequired\n\n\n\n\n\n\n\ntrain.handle_untrained_tokens_fix(\n cfg,\n model,\n tokenizer,\n train_dataset,\n safe_serialization,\n)\nApply fixes for untrained tokens if configured.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\nmodel\nPreTrainedModel\nThe model to apply fixes to.\nrequired\n\n\ntokenizer\nPreTrainedTokenizer\nThe tokenizer for token identification.\nrequired\n\n\ntrain_dataset\nDataset\nThe training dataset to use.\nrequired\n\n\nsafe_serialization\nbool\nWhether to use safe serialization when saving.\nrequired\n\n\n\n\n\n\n\ntrain.save_initial_configs(cfg, tokenizer, model, peft_config, processor)\nSave initial configurations before training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntokenizer\nPreTrainedTokenizer\nThe tokenizer to save.\nrequired\n\n\nmodel\nPreTrainedModel\nThe model to save configuration for.\nrequired\n\n\npeft_config\nPeftConfig | None\nThe PEFT configuration to save if applicable.\nrequired\n\n\n\n\n\n\n\ntrain.save_trained_model(cfg, trainer, model, safe_serialization)\nSave the trained model according to configuration and training setup.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntrainer\nAny\nThe trainer object.\nrequired\n\n\nmodel\nPreTrainedModel\nThe trained model to save.\nrequired\n\n\nsafe_serialization\nbool\nWhether to use safe serialization.\nrequired\n\n\n\n\n\n\n\ntrain.setup_model_and_tokenizer(cfg)\nLoad the tokenizer, processor (for multimodal models), and model based on configuration.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[PreTrainedModel, PreTrainedTokenizer, PeftConfig | None, ProcessorMixin | None]\nTuple containing model, tokenizer, peft_config (if LoRA / QLoRA, else None), and processor (if multimodal, else None).\n\n\n\n\n\n\n\ntrain.setup_model_and_trainer(cfg, dataset_meta)\nLoad model, tokenizer, trainer, etc. Helper function to encapsulate the full\ntrainer setup.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration dictionary with training parameters.\nrequired\n\n\ndataset_meta\nTrainDatasetMeta\nObject with training, validation datasets and metadata.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[HFRLTrainerBuilder | HFCausalTrainerBuilder, PeftModel | PreTrainedModel, PreTrainedTokenizer, PeftConfig | None, ProcessorMixin | None]\nTuple of: - Trainer (Causal or RLHF) - Model - Tokenizer - PEFT config - Processor\n\n\n\n\n\n\n\ntrain.setup_model_card(cfg)\nSet up the Axolotl badge and add the Axolotl config to the model card if available.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\ntrain.setup_reference_model(cfg, tokenizer)\nSet up the reference model for RL training if needed.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntokenizer\nPreTrainedTokenizer\nThe tokenizer to use for the reference model.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nPreTrainedModel | None\nReference model if needed for RL training, None otherwise.\n\n\n\n\n\n\n\ntrain.setup_signal_handler(cfg, model, safe_serialization)\nSet up signal handler for graceful termination.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\nmodel\nPreTrainedModel\nThe model to save on termination\nrequired\n\n\nsafe_serialization\nbool\nWhether to use safe serialization when saving\nrequired\n\n\n\n\n\n\n\ntrain.train(cfg, dataset_meta)\nTrain a model on the given dataset.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration dictionary with training parameters\nrequired\n\n\ndataset_meta\nTrainDatasetMeta\nObject with training, validation datasets and metadata\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[PeftModel | PreTrainedModel, PreTrainedTokenizer, Trainer]\nTuple of (model, tokenizer) after training" + "text": "monkeypatch.llama_attn_hijack_xformers\nmonkeypatch.llama_attn_hijack_xformers\nDirectly copied the code from https://raw.githubusercontent.com/oobabooga/text-generation-webui/main/modules/llama_attn_hijack.py and made some adjustments" }, { - "objectID": "docs/api/train.html#functions", - "href": "docs/api/train.html#functions", - "title": "train", + "objectID": "docs/api/cli.args.html", + "href": "docs/api/cli.args.html", + "title": "cli.args", "section": "", - "text": "Name\nDescription\n\n\n\n\ncreate_model_card\nCreate a model card for the trained model if needed.\n\n\ndetermine_resume_checkpoint\nDetermine the checkpoint to resume from based on configuration.\n\n\nexecute_training\nExecute the training process with appropriate SDP kernel configurations.\n\n\nhandle_untrained_tokens_fix\nApply fixes for untrained tokens if configured.\n\n\nsave_initial_configs\nSave initial configurations before training.\n\n\nsave_trained_model\nSave the trained model according to configuration and training setup.\n\n\nsetup_model_and_tokenizer\nLoad the tokenizer, processor (for multimodal models), and model based on configuration.\n\n\nsetup_model_and_trainer\nLoad model, tokenizer, trainer, etc. Helper function to encapsulate the full\n\n\nsetup_model_card\nSet up the Axolotl badge and add the Axolotl config to the model card if available.\n\n\nsetup_reference_model\nSet up the reference model for RL training if needed.\n\n\nsetup_signal_handler\nSet up signal handler for graceful termination.\n\n\ntrain\nTrain a model on the given dataset.\n\n\n\n\n\ntrain.create_model_card(cfg, trainer)\nCreate a model card for the trained model if needed.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object with model card creation capabilities.\nrequired\n\n\n\n\n\n\n\ntrain.determine_resume_checkpoint(cfg)\nDetermine the checkpoint to resume from based on configuration.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nstr | None\nPath to the checkpoint to resume from, or None if not resuming.\n\n\n\n\n\n\n\ntrain.execute_training(cfg, trainer, resume_from_checkpoint)\nExecute the training process with appropriate SDP kernel configurations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntrainer\nAny\nThe configured trainer object.\nrequired\n\n\nresume_from_checkpoint\nstr | None\nPath to checkpoint to resume from, if applicable.\nrequired\n\n\n\n\n\n\n\ntrain.handle_untrained_tokens_fix(\n cfg,\n model,\n tokenizer,\n train_dataset,\n safe_serialization,\n)\nApply fixes for untrained tokens if configured.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\nmodel\nPreTrainedModel\nThe model to apply fixes to.\nrequired\n\n\ntokenizer\nPreTrainedTokenizer\nThe tokenizer for token identification.\nrequired\n\n\ntrain_dataset\nDataset\nThe training dataset to use.\nrequired\n\n\nsafe_serialization\nbool\nWhether to use safe serialization when saving.\nrequired\n\n\n\n\n\n\n\ntrain.save_initial_configs(cfg, tokenizer, model, peft_config, processor)\nSave initial configurations before training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntokenizer\nPreTrainedTokenizer\nThe tokenizer to save.\nrequired\n\n\nmodel\nPreTrainedModel\nThe model to save configuration for.\nrequired\n\n\npeft_config\nPeftConfig | None\nThe PEFT configuration to save if applicable.\nrequired\n\n\n\n\n\n\n\ntrain.save_trained_model(cfg, trainer, model, safe_serialization)\nSave the trained model according to configuration and training setup.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntrainer\nAny\nThe trainer object.\nrequired\n\n\nmodel\nPreTrainedModel\nThe trained model to save.\nrequired\n\n\nsafe_serialization\nbool\nWhether to use safe serialization.\nrequired\n\n\n\n\n\n\n\ntrain.setup_model_and_tokenizer(cfg)\nLoad the tokenizer, processor (for multimodal models), and model based on configuration.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[PreTrainedModel, PreTrainedTokenizer, PeftConfig | None, ProcessorMixin | None]\nTuple containing model, tokenizer, peft_config (if LoRA / QLoRA, else None), and processor (if multimodal, else None).\n\n\n\n\n\n\n\ntrain.setup_model_and_trainer(cfg, dataset_meta)\nLoad model, tokenizer, trainer, etc. Helper function to encapsulate the full\ntrainer setup.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration dictionary with training parameters.\nrequired\n\n\ndataset_meta\nTrainDatasetMeta\nObject with training, validation datasets and metadata.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[HFRLTrainerBuilder | HFCausalTrainerBuilder, PeftModel | PreTrainedModel, PreTrainedTokenizer, PeftConfig | None, ProcessorMixin | None]\nTuple of: - Trainer (Causal or RLHF) - Model - Tokenizer - PEFT config - Processor\n\n\n\n\n\n\n\ntrain.setup_model_card(cfg)\nSet up the Axolotl badge and add the Axolotl config to the model card if available.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\ntrain.setup_reference_model(cfg, tokenizer)\nSet up the reference model for RL training if needed.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntokenizer\nPreTrainedTokenizer\nThe tokenizer to use for the reference model.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nPreTrainedModel | None\nReference model if needed for RL training, None otherwise.\n\n\n\n\n\n\n\ntrain.setup_signal_handler(cfg, model, safe_serialization)\nSet up signal handler for graceful termination.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\nmodel\nPreTrainedModel\nThe model to save on termination\nrequired\n\n\nsafe_serialization\nbool\nWhether to use safe serialization when saving\nrequired\n\n\n\n\n\n\n\ntrain.train(cfg, dataset_meta)\nTrain a model on the given dataset.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration dictionary with training parameters\nrequired\n\n\ndataset_meta\nTrainDatasetMeta\nObject with training, validation datasets and metadata\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[PeftModel | PreTrainedModel, PreTrainedTokenizer, Trainer]\nTuple of (model, tokenizer) after training" + "text": "cli.args\nModule for axolotl CLI command arguments.\n\n\n\n\n\nName\nDescription\n\n\n\n\nEvaluateCliArgs\nDataclass with CLI arguments for axolotl evaluate command.\n\n\nInferenceCliArgs\nDataclass with CLI arguments for axolotl inference command.\n\n\nPreprocessCliArgs\nDataclass with CLI arguments for axolotl preprocess command.\n\n\nQuantizeCliArgs\nDataclass with CLI arguments for axolotl quantize command.\n\n\nTrainerCliArgs\nDataclass with CLI arguments for axolotl train command.\n\n\nVllmServeCliArgs\nDataclass with CLI arguments for axolotl vllm-serve command.\n\n\n\n\n\ncli.args.EvaluateCliArgs(\n debug=False,\n debug_text_only=False,\n debug_num_examples=0,\n)\nDataclass with CLI arguments for axolotl evaluate command.\n\n\n\ncli.args.InferenceCliArgs(prompter=None)\nDataclass with CLI arguments for axolotl inference command.\n\n\n\ncli.args.PreprocessCliArgs(\n debug=False,\n debug_text_only=False,\n debug_num_examples=1,\n prompter=None,\n download=True,\n iterable=None,\n)\nDataclass with CLI arguments for axolotl preprocess command.\n\n\n\ncli.args.QuantizeCliArgs(\n base_model=None,\n weight_dtype=None,\n activation_dtype=None,\n quantize_embedding=None,\n group_size=None,\n output_dir=None,\n)\nDataclass with CLI arguments for axolotl quantize command.\n\n\n\ncli.args.TrainerCliArgs(\n debug=False,\n debug_text_only=False,\n debug_num_examples=0,\n merge_lora=False,\n prompter=None,\n shard=False,\n main_process_port=None,\n num_processes=None,\n)\nDataclass with CLI arguments for axolotl train command.\n\n\n\ncli.args.VllmServeCliArgs(\n tensor_parallel_size=None,\n host=None,\n port=None,\n gpu_memory_utilization=None,\n dtype=None,\n max_model_len=None,\n enable_prefix_caching=None,\n serve_module=None,\n)\nDataclass with CLI arguments for axolotl vllm-serve command." }, { - "objectID": "docs/api/utils.callbacks.qat.html", - "href": "docs/api/utils.callbacks.qat.html", - "title": "utils.callbacks.qat", + "objectID": "docs/api/cli.args.html#classes", + "href": "docs/api/cli.args.html#classes", + "title": "cli.args", "section": "", - "text": "utils.callbacks.qat\nQAT Callback for HF Causal Trainer\n\n\n\n\n\nName\nDescription\n\n\n\n\nQATCallback\nCallback to toggle fake quantization for the model.\n\n\n\n\n\nutils.callbacks.qat.QATCallback(self, cfg)\nCallback to toggle fake quantization for the model.\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\ntoggle_fake_quant\nToggle fake quantization for any fake quantized linear or embedding layers in the model.\n\n\n\n\n\nutils.callbacks.qat.toggle_fake_quant(mod, enable)\nToggle fake quantization for any fake quantized linear or embedding layers in the model.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nmod\nnn.Module\nThe module to toggle fake quantization for.\nrequired\n\n\nenable\nbool\nWhether to enable or disable fake quantization.\nrequired" + "text": "Name\nDescription\n\n\n\n\nEvaluateCliArgs\nDataclass with CLI arguments for axolotl evaluate command.\n\n\nInferenceCliArgs\nDataclass with CLI arguments for axolotl inference command.\n\n\nPreprocessCliArgs\nDataclass with CLI arguments for axolotl preprocess command.\n\n\nQuantizeCliArgs\nDataclass with CLI arguments for axolotl quantize command.\n\n\nTrainerCliArgs\nDataclass with CLI arguments for axolotl train command.\n\n\nVllmServeCliArgs\nDataclass with CLI arguments for axolotl vllm-serve command.\n\n\n\n\n\ncli.args.EvaluateCliArgs(\n debug=False,\n debug_text_only=False,\n debug_num_examples=0,\n)\nDataclass with CLI arguments for axolotl evaluate command.\n\n\n\ncli.args.InferenceCliArgs(prompter=None)\nDataclass with CLI arguments for axolotl inference command.\n\n\n\ncli.args.PreprocessCliArgs(\n debug=False,\n debug_text_only=False,\n debug_num_examples=1,\n prompter=None,\n download=True,\n iterable=None,\n)\nDataclass with CLI arguments for axolotl preprocess command.\n\n\n\ncli.args.QuantizeCliArgs(\n base_model=None,\n weight_dtype=None,\n activation_dtype=None,\n quantize_embedding=None,\n group_size=None,\n output_dir=None,\n)\nDataclass with CLI arguments for axolotl quantize command.\n\n\n\ncli.args.TrainerCliArgs(\n debug=False,\n debug_text_only=False,\n debug_num_examples=0,\n merge_lora=False,\n prompter=None,\n shard=False,\n main_process_port=None,\n num_processes=None,\n)\nDataclass with CLI arguments for axolotl train command.\n\n\n\ncli.args.VllmServeCliArgs(\n tensor_parallel_size=None,\n host=None,\n port=None,\n gpu_memory_utilization=None,\n dtype=None,\n max_model_len=None,\n enable_prefix_caching=None,\n serve_module=None,\n)\nDataclass with CLI arguments for axolotl vllm-serve command." }, { - "objectID": "docs/api/utils.callbacks.qat.html#classes", - "href": "docs/api/utils.callbacks.qat.html#classes", - "title": "utils.callbacks.qat", + "objectID": "docs/api/prompt_strategies.dpo.llama3.html", + "href": "docs/api/prompt_strategies.dpo.llama3.html", + "title": "prompt_strategies.dpo.llama3", "section": "", - "text": "Name\nDescription\n\n\n\n\nQATCallback\nCallback to toggle fake quantization for the model.\n\n\n\n\n\nutils.callbacks.qat.QATCallback(self, cfg)\nCallback to toggle fake quantization for the model." + "text": "prompt_strategies.dpo.llama3\nDPO strategies for llama-3 chat template\n\n\n\n\n\nName\nDescription\n\n\n\n\nargilla_chat\nfor argilla/dpo-mix-7k conversations\n\n\nicr\nchatml transforms for datasets with system, input, chosen, rejected\n\n\nintel\nFor Intel Orca DPO Pairs\n\n\nultra\nfor ultrafeedback binarized conversations\n\n\n\n\n\nprompt_strategies.dpo.llama3.argilla_chat(cfg, **kwargs)\nfor argilla/dpo-mix-7k conversations\n\n\n\nprompt_strategies.dpo.llama3.icr(cfg, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected\nex. https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs\n\n\n\nprompt_strategies.dpo.llama3.intel(cfg, **kwargs)\nFor Intel Orca DPO Pairs\n\n\n\nprompt_strategies.dpo.llama3.ultra(cfg, **kwargs)\nfor ultrafeedback binarized conversations" }, { - "objectID": "docs/api/utils.callbacks.qat.html#functions", - "href": "docs/api/utils.callbacks.qat.html#functions", - "title": "utils.callbacks.qat", + "objectID": "docs/api/prompt_strategies.dpo.llama3.html#functions", + "href": "docs/api/prompt_strategies.dpo.llama3.html#functions", + "title": "prompt_strategies.dpo.llama3", "section": "", - "text": "Name\nDescription\n\n\n\n\ntoggle_fake_quant\nToggle fake quantization for any fake quantized linear or embedding layers in the model.\n\n\n\n\n\nutils.callbacks.qat.toggle_fake_quant(mod, enable)\nToggle fake quantization for any fake quantized linear or embedding layers in the model.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nmod\nnn.Module\nThe module to toggle fake quantization for.\nrequired\n\n\nenable\nbool\nWhether to enable or disable fake quantization.\nrequired" + "text": "Name\nDescription\n\n\n\n\nargilla_chat\nfor argilla/dpo-mix-7k conversations\n\n\nicr\nchatml transforms for datasets with system, input, chosen, rejected\n\n\nintel\nFor Intel Orca DPO Pairs\n\n\nultra\nfor ultrafeedback binarized conversations\n\n\n\n\n\nprompt_strategies.dpo.llama3.argilla_chat(cfg, **kwargs)\nfor argilla/dpo-mix-7k conversations\n\n\n\nprompt_strategies.dpo.llama3.icr(cfg, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected\nex. https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs\n\n\n\nprompt_strategies.dpo.llama3.intel(cfg, **kwargs)\nFor Intel Orca DPO Pairs\n\n\n\nprompt_strategies.dpo.llama3.ultra(cfg, **kwargs)\nfor ultrafeedback binarized conversations" }, { - "objectID": "docs/api/monkeypatch.stablelm_attn_hijack_flash.html", - "href": "docs/api/monkeypatch.stablelm_attn_hijack_flash.html", - "title": "monkeypatch.stablelm_attn_hijack_flash", + "objectID": "docs/api/utils.schemas.datasets.html", + "href": "docs/api/utils.schemas.datasets.html", + "title": "utils.schemas.datasets", "section": "", - "text": "monkeypatch.stablelm_attn_hijack_flash\nPyTorch StableLM Epoch model.\n\n\n\n\n\nName\nDescription\n\n\n\n\nrepeat_kv\nThis is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\n\n\nrotate_half\nRotates half the hidden dims of the input.\n\n\n\n\n\nmonkeypatch.stablelm_attn_hijack_flash.repeat_kv(hidden_states, n_rep)\nThis is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\nnum_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)\n\n\n\nmonkeypatch.stablelm_attn_hijack_flash.rotate_half(x)\nRotates half the hidden dims of the input." + "text": "utils.schemas.datasets\nPydantic models for datasets-related configuration\n\n\n\n\n\nName\nDescription\n\n\n\n\nDPODataset\nDPO configuration subset\n\n\nKTODataset\nKTO configuration subset\n\n\nPretrainingDataset\nPretraining dataset configuration subset\n\n\nSFTDataset\nSFT configuration subset\n\n\nStepwiseSupervisedDataset\nStepwise supervised dataset configuration subset\n\n\nUserDefinedDPOType\nUser defined typing for DPO\n\n\nUserDefinedKTOType\nUser defined typing for KTO\n\n\nUserDefinedPrompterType\nStructure for user defined prompt types\n\n\n\n\n\nutils.schemas.datasets.DPODataset()\nDPO configuration subset\n\n\n\nutils.schemas.datasets.KTODataset()\nKTO configuration subset\n\n\n\nutils.schemas.datasets.PretrainingDataset()\nPretraining dataset configuration subset\n\n\n\nutils.schemas.datasets.SFTDataset()\nSFT configuration subset\n\n\n\n\n\nName\nDescription\n\n\n\n\nhandle_legacy_message_fields\nHandle backwards compatibility between legacy message field mapping and new property mapping system.\n\n\n\n\n\nutils.schemas.datasets.SFTDataset.handle_legacy_message_fields(data)\nHandle backwards compatibility between legacy message field mapping and new property mapping system.\n\n\n\n\n\nutils.schemas.datasets.StepwiseSupervisedDataset()\nStepwise supervised dataset configuration subset\n\n\n\nutils.schemas.datasets.UserDefinedDPOType()\nUser defined typing for DPO\n\n\n\nutils.schemas.datasets.UserDefinedKTOType()\nUser defined typing for KTO\n\n\n\nutils.schemas.datasets.UserDefinedPrompterType()\nStructure for user defined prompt types" }, { - "objectID": "docs/api/monkeypatch.stablelm_attn_hijack_flash.html#functions", - "href": "docs/api/monkeypatch.stablelm_attn_hijack_flash.html#functions", - "title": "monkeypatch.stablelm_attn_hijack_flash", + "objectID": "docs/api/utils.schemas.datasets.html#classes", + "href": "docs/api/utils.schemas.datasets.html#classes", + "title": "utils.schemas.datasets", "section": "", - "text": "Name\nDescription\n\n\n\n\nrepeat_kv\nThis is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\n\n\nrotate_half\nRotates half the hidden dims of the input.\n\n\n\n\n\nmonkeypatch.stablelm_attn_hijack_flash.repeat_kv(hidden_states, n_rep)\nThis is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\nnum_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)\n\n\n\nmonkeypatch.stablelm_attn_hijack_flash.rotate_half(x)\nRotates half the hidden dims of the input." + "text": "Name\nDescription\n\n\n\n\nDPODataset\nDPO configuration subset\n\n\nKTODataset\nKTO configuration subset\n\n\nPretrainingDataset\nPretraining dataset configuration subset\n\n\nSFTDataset\nSFT configuration subset\n\n\nStepwiseSupervisedDataset\nStepwise supervised dataset configuration subset\n\n\nUserDefinedDPOType\nUser defined typing for DPO\n\n\nUserDefinedKTOType\nUser defined typing for KTO\n\n\nUserDefinedPrompterType\nStructure for user defined prompt types\n\n\n\n\n\nutils.schemas.datasets.DPODataset()\nDPO configuration subset\n\n\n\nutils.schemas.datasets.KTODataset()\nKTO configuration subset\n\n\n\nutils.schemas.datasets.PretrainingDataset()\nPretraining dataset configuration subset\n\n\n\nutils.schemas.datasets.SFTDataset()\nSFT configuration subset\n\n\n\n\n\nName\nDescription\n\n\n\n\nhandle_legacy_message_fields\nHandle backwards compatibility between legacy message field mapping and new property mapping system.\n\n\n\n\n\nutils.schemas.datasets.SFTDataset.handle_legacy_message_fields(data)\nHandle backwards compatibility between legacy message field mapping and new property mapping system.\n\n\n\n\n\nutils.schemas.datasets.StepwiseSupervisedDataset()\nStepwise supervised dataset configuration subset\n\n\n\nutils.schemas.datasets.UserDefinedDPOType()\nUser defined typing for DPO\n\n\n\nutils.schemas.datasets.UserDefinedKTOType()\nUser defined typing for KTO\n\n\n\nutils.schemas.datasets.UserDefinedPrompterType()\nStructure for user defined prompt types" }, { - "objectID": "docs/api/utils.schemas.enums.html", - "href": "docs/api/utils.schemas.enums.html", - "title": "utils.schemas.enums", + "objectID": "docs/api/convert.html", + "href": "docs/api/convert.html", + "title": "convert", "section": "", - "text": "utils.schemas.enums\nEnums for Axolotl input config\n\n\n\n\n\nName\nDescription\n\n\n\n\nChatTemplate\nChat templates configuration subset\n\n\nCustomSupportedOptimizers\nCustom supported optimizers\n\n\nRLType\nRL trainer type configuration subset\n\n\nRingAttnFunc\nEnum class for supported ring-flash-attn implementations\n\n\nTorchIntDType\nTorch integer data types - getattr guards against torch < 2.6 which does not support int4\n\n\n\n\n\nutils.schemas.enums.ChatTemplate()\nChat templates configuration subset\n\n\n\nutils.schemas.enums.CustomSupportedOptimizers()\nCustom supported optimizers\n\n\n\nutils.schemas.enums.RLType()\nRL trainer type configuration subset\n\n\n\nutils.schemas.enums.RingAttnFunc()\nEnum class for supported ring-flash-attn implementations\n\n\n\nutils.schemas.enums.TorchIntDType()\nTorch integer data types - getattr guards against torch < 2.6 which does not support int4" + "text": "convert\nModule containing File Reader, File Writer, Json Parser, and Jsonl Serializer classes\n\n\n\n\n\nName\nDescription\n\n\n\n\nFileReader\nReads a file and returns its contents as a string\n\n\nFileWriter\nWrites a string to a file\n\n\nJsonParser\nParses a string as JSON and returns the result\n\n\nJsonToJsonlConverter\nConverts a JSON file to JSONL\n\n\nJsonlSerializer\nSerializes a list of JSON objects into a JSONL string\n\n\nStdoutWriter\nWrites a string to stdout\n\n\n\n\n\nconvert.FileReader()\nReads a file and returns its contents as a string\n\n\n\nconvert.FileWriter(file_path)\nWrites a string to a file\n\n\n\nconvert.JsonParser()\nParses a string as JSON and returns the result\n\n\n\nconvert.JsonToJsonlConverter(\n file_reader,\n file_writer,\n json_parser,\n jsonl_serializer,\n)\nConverts a JSON file to JSONL\n\n\n\nconvert.JsonlSerializer()\nSerializes a list of JSON objects into a JSONL string\n\n\n\nconvert.StdoutWriter()\nWrites a string to stdout" }, { - "objectID": "docs/api/utils.schemas.enums.html#classes", - "href": "docs/api/utils.schemas.enums.html#classes", - "title": "utils.schemas.enums", + "objectID": "docs/api/convert.html#classes", + "href": "docs/api/convert.html#classes", + "title": "convert", "section": "", - "text": "Name\nDescription\n\n\n\n\nChatTemplate\nChat templates configuration subset\n\n\nCustomSupportedOptimizers\nCustom supported optimizers\n\n\nRLType\nRL trainer type configuration subset\n\n\nRingAttnFunc\nEnum class for supported ring-flash-attn implementations\n\n\nTorchIntDType\nTorch integer data types - getattr guards against torch < 2.6 which does not support int4\n\n\n\n\n\nutils.schemas.enums.ChatTemplate()\nChat templates configuration subset\n\n\n\nutils.schemas.enums.CustomSupportedOptimizers()\nCustom supported optimizers\n\n\n\nutils.schemas.enums.RLType()\nRL trainer type configuration subset\n\n\n\nutils.schemas.enums.RingAttnFunc()\nEnum class for supported ring-flash-attn implementations\n\n\n\nutils.schemas.enums.TorchIntDType()\nTorch integer data types - getattr guards against torch < 2.6 which does not support int4" + "text": "Name\nDescription\n\n\n\n\nFileReader\nReads a file and returns its contents as a string\n\n\nFileWriter\nWrites a string to a file\n\n\nJsonParser\nParses a string as JSON and returns the result\n\n\nJsonToJsonlConverter\nConverts a JSON file to JSONL\n\n\nJsonlSerializer\nSerializes a list of JSON objects into a JSONL string\n\n\nStdoutWriter\nWrites a string to stdout\n\n\n\n\n\nconvert.FileReader()\nReads a file and returns its contents as a string\n\n\n\nconvert.FileWriter(file_path)\nWrites a string to a file\n\n\n\nconvert.JsonParser()\nParses a string as JSON and returns the result\n\n\n\nconvert.JsonToJsonlConverter(\n file_reader,\n file_writer,\n json_parser,\n jsonl_serializer,\n)\nConverts a JSON file to JSONL\n\n\n\nconvert.JsonlSerializer()\nSerializes a list of JSON objects into a JSONL string\n\n\n\nconvert.StdoutWriter()\nWrites a string to stdout" }, { - "objectID": "docs/api/loaders.constants.html", - "href": "docs/api/loaders.constants.html", - "title": "loaders.constants", + "objectID": "docs/api/prompt_strategies.llama2_chat.html", + "href": "docs/api/prompt_strategies.llama2_chat.html", + "title": "prompt_strategies.llama2_chat", "section": "", - "text": "loaders.constants\nloaders.constants\nShared constants for axolotl.loaders module" + "text": "prompt_strategies.llama2_chat\nPrompt Strategy for finetuning Llama2 chat models\nsee also https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/generation.py#L213 for ma reference implementation.\nThis implementation is based on the Vicuna PR and the fastchat repo, see also:\nhttps://github.com/lm-sys/FastChat/blob/cdd7730686cb1bf9ae2b768ee171bdf7d1ff04f3/fastchat/conversation.py#L847\nUse dataset type: “llama2_chat” in conig.yml to use this prompt style.\nE.g. in the config.yml:\ndatasets:\n - path: llama_finetune_train.jsonl\n type: llama2_chat\nThe dataset itself should look like this:\n{'conversations':[{\"from\": \"human\", \"value\": \"Who are you?\"}, {\"from\": \"gpt\", \"value\": \"I am Vicuna\"},...]}\nin a jsonl file. The first message should be from the human, the second from gpt.\nFor a custom system message, the first “from” can be “system” (followed by alternating “human” and “gpt” turns).\nImportant: Don’t use “special_tokens:” in your config.yml if you are not sure what you are doing!\n\n\n\n\n\nName\nDescription\n\n\n\n\nLLama2ChatTokenizingStrategy\nTokenizing strategy for Llama2 prompts.\n\n\nLlama2ChatConversation\nA class that manages prompt templates and keeps all conversation history.\n\n\nLlama2ChatPrompter\nA prompter that generates prompts for Llama2 models.\n\n\n\n\n\nprompt_strategies.llama2_chat.LLama2ChatTokenizingStrategy(*args, **kwargs)\nTokenizing strategy for Llama2 prompts.\nadapted from https://github.com/lm-sys/FastChat/blob/main/fastchat/train/train.py\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatConversation(\n name='llama2',\n system=\"[INST] <<SYS>>\\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\\n\\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\\n<</SYS>>\\n\\n\",\n roles=('[INST]', '[/INST]'),\n messages=list(),\n offset=0,\n)\nA class that manages prompt templates and keeps all conversation history.\ncopied from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py\n\n\n\n\n\nName\nDescription\n\n\n\n\nappend_message\nAppend a new message.\n\n\nget_prompt\nGet the prompt for generation.\n\n\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatConversation.append_message(\n role,\n message,\n)\nAppend a new message.\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatConversation.get_prompt()\nGet the prompt for generation.\n\n\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatPrompter()\nA prompter that generates prompts for Llama2 models." }, { - "objectID": "docs/api/prompt_strategies.metharme.html", - "href": "docs/api/prompt_strategies.metharme.html", - "title": "prompt_strategies.metharme", + "objectID": "docs/api/prompt_strategies.llama2_chat.html#classes", + "href": "docs/api/prompt_strategies.llama2_chat.html#classes", + "title": "prompt_strategies.llama2_chat", "section": "", - "text": "prompt_strategies.metharme\nModule containing the MetharmenPromptTokenizingStrategy and MetharmePrompter class\n\n\n\n\n\nName\nDescription\n\n\n\n\nMetharmePromptTokenizingStrategy\nTokenizing strategy for the Metharme models\n\n\nMetharmePrompter\nPrompter for the Metharme models.\n\n\n\n\n\nprompt_strategies.metharme.MetharmePromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for the Metharme models\n\n\n\nprompt_strategies.metharme.MetharmePrompter(self, *args, **kwargs)\nPrompter for the Metharme models." + "text": "Name\nDescription\n\n\n\n\nLLama2ChatTokenizingStrategy\nTokenizing strategy for Llama2 prompts.\n\n\nLlama2ChatConversation\nA class that manages prompt templates and keeps all conversation history.\n\n\nLlama2ChatPrompter\nA prompter that generates prompts for Llama2 models.\n\n\n\n\n\nprompt_strategies.llama2_chat.LLama2ChatTokenizingStrategy(*args, **kwargs)\nTokenizing strategy for Llama2 prompts.\nadapted from https://github.com/lm-sys/FastChat/blob/main/fastchat/train/train.py\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatConversation(\n name='llama2',\n system=\"[INST] <<SYS>>\\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\\n\\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\\n<</SYS>>\\n\\n\",\n roles=('[INST]', '[/INST]'),\n messages=list(),\n offset=0,\n)\nA class that manages prompt templates and keeps all conversation history.\ncopied from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py\n\n\n\n\n\nName\nDescription\n\n\n\n\nappend_message\nAppend a new message.\n\n\nget_prompt\nGet the prompt for generation.\n\n\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatConversation.append_message(\n role,\n message,\n)\nAppend a new message.\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatConversation.get_prompt()\nGet the prompt for generation.\n\n\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatPrompter()\nA prompter that generates prompts for Llama2 models." }, { - "objectID": "docs/api/prompt_strategies.metharme.html#classes", - "href": "docs/api/prompt_strategies.metharme.html#classes", - "title": "prompt_strategies.metharme", + "objectID": "docs/api/monkeypatch.transformers_fa_utils.html", + "href": "docs/api/monkeypatch.transformers_fa_utils.html", + "title": "monkeypatch.transformers_fa_utils", "section": "", - "text": "Name\nDescription\n\n\n\n\nMetharmePromptTokenizingStrategy\nTokenizing strategy for the Metharme models\n\n\nMetharmePrompter\nPrompter for the Metharme models.\n\n\n\n\n\nprompt_strategies.metharme.MetharmePromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for the Metharme models\n\n\n\nprompt_strategies.metharme.MetharmePrompter(self, *args, **kwargs)\nPrompter for the Metharme models." + "text": "monkeypatch.transformers_fa_utils\nsee https://github.com/huggingface/transformers/pull/35834\n\n\n\n\n\nName\nDescription\n\n\n\n\nfixed_fa_peft_integration_check\nPEFT usually casts the layer norms in float32 for training stability reasons\n\n\n\n\n\nmonkeypatch.transformers_fa_utils.fixed_fa_peft_integration_check(\n query,\n key,\n value,\n target_dtype=None,\n preferred_dtype=None,\n)\nPEFT usually casts the layer norms in float32 for training stability reasons\ntherefore the input hidden states gets silently casted in float32. Hence, we need\ncast them back in float16 / bfloat16 just to be sure everything works as expected.\nThis might slowdown training & inference so it is recommended to not cast the LayerNorms!\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nquery\ntorch.Tensor\nInput query states to be passed to Flash Attention API\nrequired\n\n\nkey\ntorch.Tensor\nInput key states to be passed to Flash Attention API\nrequired\n\n\nvalue\ntorch.Tensor\nInput value states to be passed to Flash Attention API\nrequired\n\n\ntarget_dtype\ntorch.dtype, optional\nThe dtype to convert the attention tensors to. Conversion can be ignored by not providing the target dtype.\nNone\n\n\npreferred_dtype\ntorch.dtype, optional\nThe preferred dtype to convert the attention tensors to regardless of the target dtype.\nNone" }, { - "objectID": "docs/api/kernels.swiglu.html", - "href": "docs/api/kernels.swiglu.html", - "title": "kernels.swiglu", + "objectID": "docs/api/monkeypatch.transformers_fa_utils.html#functions", + "href": "docs/api/monkeypatch.transformers_fa_utils.html#functions", + "title": "monkeypatch.transformers_fa_utils", "section": "", - "text": "kernels.swiglu\nModule for definition of SwiGLU Triton kernels.\nSee “GLU Variants Improve Transformer” (https://arxiv.org/abs/2002.05202).\nCredit to unsloth (https://unsloth.ai/) for inspiration for this implementation.\n\n\n\n\n\nName\nDescription\n\n\n\n\nswiglu_backward\nSwiGLU backward pass using in-place operations.\n\n\nswiglu_forward\nSwiGLU forward pass. Computes SwiGLU activation: x * sigmoid(x) * up, where\n\n\n\n\n\nkernels.swiglu.swiglu_backward(grad_output, gate, up)\nSwiGLU backward pass using in-place operations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ngrad_output\ntorch.Tensor\nGradient of loss with respect to output, shape [batch, seq_len, hidden_dim].\nrequired\n\n\ngate\ntorch.Tensor\nGate tensor from forward pass, shape [batch, seq_len, hidden_dim].\nrequired\n\n\nup\ntorch.Tensor\nUp-projection tensor from forward pass, shape [batch, seq_len, hidden_dim].\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[torch.Tensor, torch.Tensor, torch.Tensor]\nTuple containing: - Forward pass output (h) - Gradient with respect to gate (df) - Gradient with respect to up-projection (de)\n\n\n\n\n\n\n\nkernels.swiglu.swiglu_forward(gate, up)\nSwiGLU forward pass. Computes SwiGLU activation: x * sigmoid(x) * up, where\nx is the gate tensor.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ngate\ntorch.Tensor\nInput gate tensor of shape [batch, seq_len, hidden_dim].\nrequired\n\n\nup\ntorch.Tensor\nUp-projection tensor of shape [batch, seq_len, hidden_dim].\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nOutput tensor of shape [batch, seq_len, hidden_dim]." + "text": "Name\nDescription\n\n\n\n\nfixed_fa_peft_integration_check\nPEFT usually casts the layer norms in float32 for training stability reasons\n\n\n\n\n\nmonkeypatch.transformers_fa_utils.fixed_fa_peft_integration_check(\n query,\n key,\n value,\n target_dtype=None,\n preferred_dtype=None,\n)\nPEFT usually casts the layer norms in float32 for training stability reasons\ntherefore the input hidden states gets silently casted in float32. Hence, we need\ncast them back in float16 / bfloat16 just to be sure everything works as expected.\nThis might slowdown training & inference so it is recommended to not cast the LayerNorms!\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nquery\ntorch.Tensor\nInput query states to be passed to Flash Attention API\nrequired\n\n\nkey\ntorch.Tensor\nInput key states to be passed to Flash Attention API\nrequired\n\n\nvalue\ntorch.Tensor\nInput value states to be passed to Flash Attention API\nrequired\n\n\ntarget_dtype\ntorch.dtype, optional\nThe dtype to convert the attention tensors to. Conversion can be ignored by not providing the target dtype.\nNone\n\n\npreferred_dtype\ntorch.dtype, optional\nThe preferred dtype to convert the attention tensors to regardless of the target dtype.\nNone" }, { - "objectID": "docs/api/kernels.swiglu.html#functions", - "href": "docs/api/kernels.swiglu.html#functions", - "title": "kernels.swiglu", + "objectID": "docs/api/cli.checks.html", + "href": "docs/api/cli.checks.html", + "title": "cli.checks", "section": "", - "text": "Name\nDescription\n\n\n\n\nswiglu_backward\nSwiGLU backward pass using in-place operations.\n\n\nswiglu_forward\nSwiGLU forward pass. Computes SwiGLU activation: x * sigmoid(x) * up, where\n\n\n\n\n\nkernels.swiglu.swiglu_backward(grad_output, gate, up)\nSwiGLU backward pass using in-place operations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ngrad_output\ntorch.Tensor\nGradient of loss with respect to output, shape [batch, seq_len, hidden_dim].\nrequired\n\n\ngate\ntorch.Tensor\nGate tensor from forward pass, shape [batch, seq_len, hidden_dim].\nrequired\n\n\nup\ntorch.Tensor\nUp-projection tensor from forward pass, shape [batch, seq_len, hidden_dim].\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[torch.Tensor, torch.Tensor, torch.Tensor]\nTuple containing: - Forward pass output (h) - Gradient with respect to gate (df) - Gradient with respect to up-projection (de)\n\n\n\n\n\n\n\nkernels.swiglu.swiglu_forward(gate, up)\nSwiGLU forward pass. Computes SwiGLU activation: x * sigmoid(x) * up, where\nx is the gate tensor.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ngate\ntorch.Tensor\nInput gate tensor of shape [batch, seq_len, hidden_dim].\nrequired\n\n\nup\ntorch.Tensor\nUp-projection tensor of shape [batch, seq_len, hidden_dim].\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nOutput tensor of shape [batch, seq_len, hidden_dim]." + "text": "cli.checks\nVarious checks for Axolotl CLI.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncheck_accelerate_default_config\nLogs at warning level if no accelerate config file is found.\n\n\ncheck_user_token\nChecks for HF user info. Check is skipped if HF_HUB_OFFLINE=1.\n\n\n\n\n\ncli.checks.check_accelerate_default_config()\nLogs at warning level if no accelerate config file is found.\n\n\n\ncli.checks.check_user_token()\nChecks for HF user info. Check is skipped if HF_HUB_OFFLINE=1.\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nbool\nBoolean indicating successful check (i.e., HF_HUB_OFFLINE=1 or HF user info is retrieved).\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nLocalTokenNotFoundError\nIf HF user info can’t be retrieved." }, { - "objectID": "docs/api/core.trainers.base.html", - "href": "docs/api/core.trainers.base.html", - "title": "core.trainers.base", + "objectID": "docs/api/cli.checks.html#functions", + "href": "docs/api/cli.checks.html#functions", + "title": "cli.checks", "section": "", - "text": "core.trainers.base\nModule for customized trainers\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlTrainer\nExtend the base Trainer for axolotl helpers\n\n\n\n\n\ncore.trainers.base.AxolotlTrainer(\n self,\n *_args,\n bench_data_collator=None,\n eval_data_collator=None,\n dataset_tags=None,\n **kwargs,\n)\nExtend the base Trainer for axolotl helpers\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_eval_dataloader\nGet dataloader for evaluation\n\n\nget_train_dataloader\nGet dataloader for training\n\n\nlog\nLog logs on the various objects watching training, including stored metrics.\n\n\npush_to_hub\nOverwrite the push_to_hub method in order to force-add the tags when pushing the\n\n\n\n\n\ncore.trainers.base.AxolotlTrainer.get_eval_dataloader(eval_dataset=None)\nGet dataloader for evaluation\n\n\n\ncore.trainers.base.AxolotlTrainer.get_train_dataloader()\nGet dataloader for training\n\n\n\ncore.trainers.base.AxolotlTrainer.log(logs, start_time=None)\nLog logs on the various objects watching training, including stored metrics.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nlogs\ndict[str, float]\nThe values to log.\nrequired\n\n\nstart_time\nfloat | None\nThe start of training.\nNone\n\n\n\n\n\n\n\ncore.trainers.base.AxolotlTrainer.push_to_hub(*args, **kwargs)\nOverwrite the push_to_hub method in order to force-add the tags when pushing the\nmodel on the Hub. Please refer to ~transformers.Trainer.push_to_hub for more details." + "text": "Name\nDescription\n\n\n\n\ncheck_accelerate_default_config\nLogs at warning level if no accelerate config file is found.\n\n\ncheck_user_token\nChecks for HF user info. Check is skipped if HF_HUB_OFFLINE=1.\n\n\n\n\n\ncli.checks.check_accelerate_default_config()\nLogs at warning level if no accelerate config file is found.\n\n\n\ncli.checks.check_user_token()\nChecks for HF user info. Check is skipped if HF_HUB_OFFLINE=1.\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nbool\nBoolean indicating successful check (i.e., HF_HUB_OFFLINE=1 or HF user info is retrieved).\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nLocalTokenNotFoundError\nIf HF user info can’t be retrieved." }, { - "objectID": "docs/api/core.trainers.base.html#classes", - "href": "docs/api/core.trainers.base.html#classes", - "title": "core.trainers.base", + "objectID": "docs/api/core.chat.format.llama3x.html", + "href": "docs/api/core.chat.format.llama3x.html", + "title": "core.chat.format.llama3x", "section": "", - "text": "Name\nDescription\n\n\n\n\nAxolotlTrainer\nExtend the base Trainer for axolotl helpers\n\n\n\n\n\ncore.trainers.base.AxolotlTrainer(\n self,\n *_args,\n bench_data_collator=None,\n eval_data_collator=None,\n dataset_tags=None,\n **kwargs,\n)\nExtend the base Trainer for axolotl helpers\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_eval_dataloader\nGet dataloader for evaluation\n\n\nget_train_dataloader\nGet dataloader for training\n\n\nlog\nLog logs on the various objects watching training, including stored metrics.\n\n\npush_to_hub\nOverwrite the push_to_hub method in order to force-add the tags when pushing the\n\n\n\n\n\ncore.trainers.base.AxolotlTrainer.get_eval_dataloader(eval_dataset=None)\nGet dataloader for evaluation\n\n\n\ncore.trainers.base.AxolotlTrainer.get_train_dataloader()\nGet dataloader for training\n\n\n\ncore.trainers.base.AxolotlTrainer.log(logs, start_time=None)\nLog logs on the various objects watching training, including stored metrics.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nlogs\ndict[str, float]\nThe values to log.\nrequired\n\n\nstart_time\nfloat | None\nThe start of training.\nNone\n\n\n\n\n\n\n\ncore.trainers.base.AxolotlTrainer.push_to_hub(*args, **kwargs)\nOverwrite the push_to_hub method in order to force-add the tags when pushing the\nmodel on the Hub. Please refer to ~transformers.Trainer.push_to_hub for more details." + "text": "core.chat.format.llama3x\ncore.chat.format.llama3x\nLlama 3.x chat formatting functions for MessageContents" }, { - "objectID": "docs/api/monkeypatch.relora.html", - "href": "docs/api/monkeypatch.relora.html", - "title": "monkeypatch.relora", + "objectID": "docs/api/monkeypatch.mistral_attn_hijack_flash.html", + "href": "docs/api/monkeypatch.mistral_attn_hijack_flash.html", + "title": "monkeypatch.mistral_attn_hijack_flash", "section": "", - "text": "monkeypatch.relora\nImplements the ReLoRA training procedure from https://arxiv.org/abs/2307.05695, minus the initial full fine-tune.\n\n\n\n\n\nName\nDescription\n\n\n\n\nReLoRACallback\nCallback to merge LoRA weights into the base model and save full-weight checkpoints\n\n\nReLoRAScheduler\nWraps another scheduler to apply per-lora-restart learning rate warmups.\n\n\n\n\n\nmonkeypatch.relora.ReLoRACallback(self, cfg)\nCallback to merge LoRA weights into the base model and save full-weight checkpoints\n\n\n\nmonkeypatch.relora.ReLoRAScheduler(\n self,\n optimizer,\n inner_schedule,\n relora_steps,\n warmup_steps,\n anneal_steps=1,\n min_lr_scale=0.001,\n)\nWraps another scheduler to apply per-lora-restart learning rate warmups." + "text": "monkeypatch.mistral_attn_hijack_flash\nFlash attention monkey patch for mistral model\n\n\n\n\n\nName\nDescription\n\n\n\n\nMistralDecoderLayer\npatched version of MistralDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nmonkeypatch.mistral_attn_hijack_flash.MistralDecoderLayer()\npatched version of MistralDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nName\nDescription\n\n\n\n\nforward\n\n\n\n\n\n\nmonkeypatch.mistral_attn_hijack_flash.MistralDecoderLayer.forward(\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n cu_seqlens=None,\n max_seqlen=None,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nhidden_states\ntorch.FloatTensor\ninput to the layer of shape (batch, seq_len, embed_dim)\nrequired\n\n\nattention_mask\ntorch.FloatTensor, optional\nattention mask of size (batch, 1, tgt_len, src_len) where padding elements are indicated by very large negative values.\nNone\n\n\noutput_attentions\nbool, optional\nWhether or not to return the attentions tensors of all attention layers. See attentions under returned tensors for more detail.\nFalse\n\n\nuse_cache\nbool, optional\nIf set to True, past_key_values key value states are returned and can be used to speed up decoding (see past_key_values).\nFalse\n\n\npast_key_value\nTuple(torch.FloatTensor), optional\ncached past key and value projection states\nNone\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\ngenerate_qkv\n\n\n\n\n\n\nmonkeypatch.mistral_attn_hijack_flash.generate_qkv(\n q,\n k,\n v,\n query_padding_mask=None,\n key_padding_mask=None,\n kvpacked=False,\n qkvpacked=False,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nq\n\n(batch_size, seqlen_q, nheads, d)\nrequired\n\n\nk\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nv\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nquery_padding_mask\n\n(batch_size, seqlen), bool\nNone\n\n\nkey_padding_mask\n\n(batch_size, seqlen), bool\nNone" }, { - "objectID": "docs/api/monkeypatch.relora.html#classes", - "href": "docs/api/monkeypatch.relora.html#classes", - "title": "monkeypatch.relora", + "objectID": "docs/api/monkeypatch.mistral_attn_hijack_flash.html#classes", + "href": "docs/api/monkeypatch.mistral_attn_hijack_flash.html#classes", + "title": "monkeypatch.mistral_attn_hijack_flash", "section": "", - "text": "Name\nDescription\n\n\n\n\nReLoRACallback\nCallback to merge LoRA weights into the base model and save full-weight checkpoints\n\n\nReLoRAScheduler\nWraps another scheduler to apply per-lora-restart learning rate warmups.\n\n\n\n\n\nmonkeypatch.relora.ReLoRACallback(self, cfg)\nCallback to merge LoRA weights into the base model and save full-weight checkpoints\n\n\n\nmonkeypatch.relora.ReLoRAScheduler(\n self,\n optimizer,\n inner_schedule,\n relora_steps,\n warmup_steps,\n anneal_steps=1,\n min_lr_scale=0.001,\n)\nWraps another scheduler to apply per-lora-restart learning rate warmups." + "text": "Name\nDescription\n\n\n\n\nMistralDecoderLayer\npatched version of MistralDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nmonkeypatch.mistral_attn_hijack_flash.MistralDecoderLayer()\npatched version of MistralDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nName\nDescription\n\n\n\n\nforward\n\n\n\n\n\n\nmonkeypatch.mistral_attn_hijack_flash.MistralDecoderLayer.forward(\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n cu_seqlens=None,\n max_seqlen=None,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nhidden_states\ntorch.FloatTensor\ninput to the layer of shape (batch, seq_len, embed_dim)\nrequired\n\n\nattention_mask\ntorch.FloatTensor, optional\nattention mask of size (batch, 1, tgt_len, src_len) where padding elements are indicated by very large negative values.\nNone\n\n\noutput_attentions\nbool, optional\nWhether or not to return the attentions tensors of all attention layers. See attentions under returned tensors for more detail.\nFalse\n\n\nuse_cache\nbool, optional\nIf set to True, past_key_values key value states are returned and can be used to speed up decoding (see past_key_values).\nFalse\n\n\npast_key_value\nTuple(torch.FloatTensor), optional\ncached past key and value projection states\nNone" }, { - "objectID": "docs/api/cli.cloud.modal_.html", - "href": "docs/api/cli.cloud.modal_.html", - "title": "cli.cloud.modal_", + "objectID": "docs/api/monkeypatch.mistral_attn_hijack_flash.html#functions", + "href": "docs/api/monkeypatch.mistral_attn_hijack_flash.html#functions", + "title": "monkeypatch.mistral_attn_hijack_flash", "section": "", - "text": "cli.cloud.modal_\nModal Cloud support from CLI\n\n\n\n\n\nName\nDescription\n\n\n\n\nModalCloud\nModal Cloud implementation.\n\n\n\n\n\ncli.cloud.modal_.ModalCloud(self, config, app=None)\nModal Cloud implementation.\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nrun_cmd\nRun a command inside a folder, with Modal Volume reloading before and commit on success.\n\n\n\n\n\ncli.cloud.modal_.run_cmd(cmd, run_folder, volumes=None)\nRun a command inside a folder, with Modal Volume reloading before and commit on success." + "text": "Name\nDescription\n\n\n\n\ngenerate_qkv\n\n\n\n\n\n\nmonkeypatch.mistral_attn_hijack_flash.generate_qkv(\n q,\n k,\n v,\n query_padding_mask=None,\n key_padding_mask=None,\n kvpacked=False,\n qkvpacked=False,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nq\n\n(batch_size, seqlen_q, nheads, d)\nrequired\n\n\nk\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nv\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nquery_padding_mask\n\n(batch_size, seqlen), bool\nNone\n\n\nkey_padding_mask\n\n(batch_size, seqlen), bool\nNone" }, { - "objectID": "docs/api/cli.cloud.modal_.html#classes", - "href": "docs/api/cli.cloud.modal_.html#classes", - "title": "cli.cloud.modal_", + "objectID": "docs/api/utils.callbacks.mlflow_.html", + "href": "docs/api/utils.callbacks.mlflow_.html", + "title": "utils.callbacks.mlflow_", "section": "", - "text": "Name\nDescription\n\n\n\n\nModalCloud\nModal Cloud implementation.\n\n\n\n\n\ncli.cloud.modal_.ModalCloud(self, config, app=None)\nModal Cloud implementation." + "text": "utils.callbacks.mlflow_\nMLFlow module for trainer callbacks\n\n\n\n\n\nName\nDescription\n\n\n\n\nSaveAxolotlConfigtoMlflowCallback\nCallback to save axolotl config to mlflow\n\n\n\n\n\nutils.callbacks.mlflow_.SaveAxolotlConfigtoMlflowCallback(axolotl_config_path)\nCallback to save axolotl config to mlflow" }, { - "objectID": "docs/api/cli.cloud.modal_.html#functions", - "href": "docs/api/cli.cloud.modal_.html#functions", - "title": "cli.cloud.modal_", + "objectID": "docs/api/utils.callbacks.mlflow_.html#classes", + "href": "docs/api/utils.callbacks.mlflow_.html#classes", + "title": "utils.callbacks.mlflow_", "section": "", - "text": "Name\nDescription\n\n\n\n\nrun_cmd\nRun a command inside a folder, with Modal Volume reloading before and commit on success.\n\n\n\n\n\ncli.cloud.modal_.run_cmd(cmd, run_folder, volumes=None)\nRun a command inside a folder, with Modal Volume reloading before and commit on success." + "text": "Name\nDescription\n\n\n\n\nSaveAxolotlConfigtoMlflowCallback\nCallback to save axolotl config to mlflow\n\n\n\n\n\nutils.callbacks.mlflow_.SaveAxolotlConfigtoMlflowCallback(axolotl_config_path)\nCallback to save axolotl config to mlflow" }, { - "objectID": "docs/api/core.training_args.html", - "href": "docs/api/core.training_args.html", - "title": "core.training_args", + "objectID": "docs/api/core.datasets.chat.html", + "href": "docs/api/core.datasets.chat.html", + "title": "core.datasets.chat", "section": "", - "text": "core.training_args\nextra axolotl specific training args\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlCPOConfig\nCPO config for CPO training\n\n\nAxolotlKTOConfig\nKTO config for KTO training\n\n\nAxolotlORPOConfig\nORPO config for ORPO training\n\n\nAxolotlPRMConfig\nPRM config for PRM training\n\n\nAxolotlRewardConfig\nReward config for Reward training\n\n\nAxolotlTrainingArguments\nTraining arguments for Causal trainer\n\n\nAxolotlTrainingMixins\nMixin class for the Axolotl training args.\n\n\n\n\n\ncore.training_args.AxolotlCPOConfig(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n simpo_gamma=None,\n)\nCPO config for CPO training\n\n\n\ncore.training_args.AxolotlKTOConfig(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nKTO config for KTO training\n\n\n\ncore.training_args.AxolotlORPOConfig(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nORPO config for ORPO training\n\n\n\ncore.training_args.AxolotlPRMConfig(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nPRM config for PRM training\n\n\n\ncore.training_args.AxolotlRewardConfig(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nReward config for Reward training\n\n\n\ncore.training_args.AxolotlTrainingArguments(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nTraining arguments for Causal trainer\nThis code is duplicated due to HF TrainingArguments not setting output_dir with a\ndefault value so it can’t be used as a mixin.\n\n\n\ncore.training_args.AxolotlTrainingMixins(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nMixin class for the Axolotl training args." + "text": "core.datasets.chat\nchat dataset module\n\n\n\n\n\nName\nDescription\n\n\n\n\nTokenizedChatDataset\nTokenized chat dataset\n\n\n\n\n\ncore.datasets.chat.TokenizedChatDataset(\n data,\n model_transform,\n *args,\n message_transform=None,\n formatter=None,\n process_count=None,\n keep_in_memory=False,\n **kwargs,\n)\nTokenized chat dataset" }, { - "objectID": "docs/api/core.training_args.html#classes", - "href": "docs/api/core.training_args.html#classes", - "title": "core.training_args", + "objectID": "docs/api/core.datasets.chat.html#classes", + "href": "docs/api/core.datasets.chat.html#classes", + "title": "core.datasets.chat", "section": "", - "text": "Name\nDescription\n\n\n\n\nAxolotlCPOConfig\nCPO config for CPO training\n\n\nAxolotlKTOConfig\nKTO config for KTO training\n\n\nAxolotlORPOConfig\nORPO config for ORPO training\n\n\nAxolotlPRMConfig\nPRM config for PRM training\n\n\nAxolotlRewardConfig\nReward config for Reward training\n\n\nAxolotlTrainingArguments\nTraining arguments for Causal trainer\n\n\nAxolotlTrainingMixins\nMixin class for the Axolotl training args.\n\n\n\n\n\ncore.training_args.AxolotlCPOConfig(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n simpo_gamma=None,\n)\nCPO config for CPO training\n\n\n\ncore.training_args.AxolotlKTOConfig(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nKTO config for KTO training\n\n\n\ncore.training_args.AxolotlORPOConfig(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nORPO config for ORPO training\n\n\n\ncore.training_args.AxolotlPRMConfig(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nPRM config for PRM training\n\n\n\ncore.training_args.AxolotlRewardConfig(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nReward config for Reward training\n\n\n\ncore.training_args.AxolotlTrainingArguments(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nTraining arguments for Causal trainer\nThis code is duplicated due to HF TrainingArguments not setting output_dir with a\ndefault value so it can’t be used as a mixin.\n\n\n\ncore.training_args.AxolotlTrainingMixins(\n self,\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_optimizer=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nMixin class for the Axolotl training args." + "text": "Name\nDescription\n\n\n\n\nTokenizedChatDataset\nTokenized chat dataset\n\n\n\n\n\ncore.datasets.chat.TokenizedChatDataset(\n data,\n model_transform,\n *args,\n message_transform=None,\n formatter=None,\n process_count=None,\n keep_in_memory=False,\n **kwargs,\n)\nTokenized chat dataset" }, { - "objectID": "docs/api/utils.callbacks.comet_.html", - "href": "docs/api/utils.callbacks.comet_.html", - "title": "utils.callbacks.comet_", + "objectID": "docs/api/core.trainers.mixins.scheduler.html", + "href": "docs/api/core.trainers.mixins.scheduler.html", + "title": "core.trainers.mixins.scheduler", "section": "", - "text": "utils.callbacks.comet_\nComet module for trainer callbacks\n\n\n\n\n\nName\nDescription\n\n\n\n\nSaveAxolotlConfigtoCometCallback\nCallback to save axolotl config to comet\n\n\n\n\n\nutils.callbacks.comet_.SaveAxolotlConfigtoCometCallback(\n self,\n axolotl_config_path,\n)\nCallback to save axolotl config to comet" + "text": "core.trainers.mixins.scheduler\nModule for Axolotl trainer scheduler mixin\n\n\n\n\n\nName\nDescription\n\n\n\n\nSchedulerMixin\nMixin class for scheduler setup in CausalTrainer.\n\n\n\n\n\ncore.trainers.mixins.scheduler.SchedulerMixin()\nMixin class for scheduler setup in CausalTrainer.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncreate_scheduler\nSet up the scheduler. The optimizer of the trainer must have been set up either before this method is called or\n\n\n\n\n\ncore.trainers.mixins.scheduler.SchedulerMixin.create_scheduler(\n num_training_steps,\n optimizer=None,\n)\nSet up the scheduler. The optimizer of the trainer must have been set up either before this method is called or\npassed as an argument.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nnum_training_steps\nint\nThe number of training steps to do.\nrequired\n\n\noptimizer\ntorch.optim.Optimizer\nThe training optimizer\nNone" }, { - "objectID": "docs/api/utils.callbacks.comet_.html#classes", - "href": "docs/api/utils.callbacks.comet_.html#classes", - "title": "utils.callbacks.comet_", + "objectID": "docs/api/core.trainers.mixins.scheduler.html#classes", + "href": "docs/api/core.trainers.mixins.scheduler.html#classes", + "title": "core.trainers.mixins.scheduler", "section": "", - "text": "Name\nDescription\n\n\n\n\nSaveAxolotlConfigtoCometCallback\nCallback to save axolotl config to comet\n\n\n\n\n\nutils.callbacks.comet_.SaveAxolotlConfigtoCometCallback(\n self,\n axolotl_config_path,\n)\nCallback to save axolotl config to comet" + "text": "Name\nDescription\n\n\n\n\nSchedulerMixin\nMixin class for scheduler setup in CausalTrainer.\n\n\n\n\n\ncore.trainers.mixins.scheduler.SchedulerMixin()\nMixin class for scheduler setup in CausalTrainer.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncreate_scheduler\nSet up the scheduler. The optimizer of the trainer must have been set up either before this method is called or\n\n\n\n\n\ncore.trainers.mixins.scheduler.SchedulerMixin.create_scheduler(\n num_training_steps,\n optimizer=None,\n)\nSet up the scheduler. The optimizer of the trainer must have been set up either before this method is called or\npassed as an argument.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nnum_training_steps\nint\nThe number of training steps to do.\nrequired\n\n\noptimizer\ntorch.optim.Optimizer\nThe training optimizer\nNone" }, { - "objectID": "docs/api/core.chat.format.shared.html", - "href": "docs/api/core.chat.format.shared.html", - "title": "core.chat.format.shared", + "objectID": "docs/api/utils.schedulers.html", + "href": "docs/api/utils.schedulers.html", + "title": "utils.schedulers", "section": "", - "text": "core.chat.format.shared\ncore.chat.format.shared\nshared functions for format transforms" + "text": "utils.schedulers\nModule for custom LRScheduler class\n\n\n\n\n\nName\nDescription\n\n\n\n\nInterpolatingLogScheduler\nA scheduler that interpolates learning rates in a logarithmic fashion\n\n\nRexLR\nReflected Exponential (REX) learning rate scheduler.\n\n\n\n\n\nutils.schedulers.InterpolatingLogScheduler(\n optimizer,\n num_steps,\n min_lr,\n max_lr,\n last_epoch=-1,\n)\nA scheduler that interpolates learning rates in a logarithmic fashion\n\n\n\nutils.schedulers.RexLR(\n optimizer,\n max_lr,\n min_lr,\n total_steps=0,\n num_warmup_steps=0,\n last_step=0,\n)\nReflected Exponential (REX) learning rate scheduler.\n\nOriginal implementation: https://github.com/IvanVassi/REX_LR\nOriginal license: Apache 2.0\nBased on: https://arxiv.org/abs/2107.04197\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\noptimizer\ntorch.optim.Optimizer\nThe optimizer to schedule the learning rate for.\nrequired\n\n\nmax_lr\nfloat\nThe maximum learning rate.\nrequired\n\n\nmin_lr\nfloat\nThe minimum learning rate.\nrequired\n\n\ntotal_steps\nint\nThe total number of training steps.\n0\n\n\nnum_warmup_steps\nint\nThe number of warmup steps.\n0\n\n\nlast_step\nint\nThe index of last step.\n0\n\n\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_cosine_schedule_with_min_lr\n\n\n\nget_cosine_schedule_with_quadratic_warmup\nCreate a schedule with a learning rate that decreases following the values of the cosine function between the\n\n\nget_cosine_schedule_with_warmup_decay_constant\nImplementation of Continual Pre-Training of Large Language Models: How to (re)warm your model? (https://arxiv.org/pdf/2308.04014.pdf)\n\n\n\n\n\nutils.schedulers.get_cosine_schedule_with_min_lr(\n optimizer,\n num_warmup_steps,\n num_training_steps,\n min_lr_ratio=0.0,\n)\n\n\n\nlinear warmup from 0 -> max_lr over num_warmup_steps\ncosine learning rate annealing from max_lr -> min_lr over num_training_steps\n\n\n\n\n\nutils.schedulers.get_cosine_schedule_with_quadratic_warmup(\n optimizer,\n num_warmup_steps,\n num_training_steps,\n num_cycles=0.5,\n last_epoch=-1,\n)\nCreate a schedule with a learning rate that decreases following the values of the cosine function between the\ninitial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the\ninitial lr set in the optimizer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\noptimizer\n[~torch.optim.Optimizer]\nThe optimizer for which to schedule the learning rate.\nrequired\n\n\nnum_warmup_steps\nint\nThe number of steps for the warmup phase.\nrequired\n\n\nnum_training_steps\nint\nThe total number of training steps.\nrequired\n\n\nnum_cycles\nfloat, optional, defaults to 0.5\nThe number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine).\n0.5\n\n\nlast_epoch\nint, optional, defaults to -1\nThe index of the last epoch when resuming training.\n-1\n\n\n\n\n\n\ntorch.optim.lr_scheduler.LambdaLR with the appropriate schedule.\n\n\n\n\nutils.schedulers.get_cosine_schedule_with_warmup_decay_constant(\n optimizer,\n num_warmup_steps,\n num_training_steps,\n constant_lr_ratio,\n min_lr_ratio,\n num_cycles=0.5,\n last_epoch=-1,\n)\nImplementation of Continual Pre-Training of Large Language Models: How to (re)warm your model? (https://arxiv.org/pdf/2308.04014.pdf)\nCreate a schedule with a learning rate that decreases following the values of the cosine function between the\ninitial lr set in the optimizer to min_lr_ratio until num_training_steps * constant_lr_ratio, after constant_rate returns constant value of min_rate\n, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\noptimizer\n[~torch.optim.Optimizer]\nThe optimizer for which to schedule the learning rate.\nrequired\n\n\nnum_warmup_steps\nint\nThe number of steps for the warmup phase.\nrequired\n\n\nnum_training_steps\nint\nThe total number of training steps.\nrequired\n\n\nconstant_lr_ratio\nfloat\n(float): The ratio of num_training_steps to decrease by cosine function.\nrequired\n\n\nmin_lr_ratio\nfloat\n(float): The ratio of maximum learning rate for cosine function to decay to minimum learning rate. | _required_ | | num_cycles |float, *optional*, defaults to 0.5 | The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). |0.5| | last_epoch |int, *optional*, defaults to -1 | The index of the last epoch when resuming training. |-1`\n\n\n\n\n\n\n\ntorch.optim.lr_scheduler.LambdaLR with the appropriate schedule." }, { - "objectID": "docs/api/monkeypatch.lora_kernels.html", - "href": "docs/api/monkeypatch.lora_kernels.html", - "title": "monkeypatch.lora_kernels", + "objectID": "docs/api/utils.schedulers.html#classes", + "href": "docs/api/utils.schedulers.html#classes", + "title": "utils.schedulers", "section": "", - "text": "monkeypatch.lora_kernels\nModule for patching custom LoRA Triton kernels and torch.autograd functions.\n\n\n\n\n\nName\nDescription\n\n\n\n\nFakeMLP\nplaceholder MLP for triton patching\n\n\n\n\n\nmonkeypatch.lora_kernels.FakeMLP(self, gate_proj, up_proj, down_proj)\nplaceholder MLP for triton patching\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\napply_lora_kernel_patches\nApplies optimized Triton kernel patches to a PEFT model.\n\n\nget_attention_cls_from_config\nGet the appropriate attention class by inspecting the model config.\n\n\noriginal_apply_o\nOriginal implementation of output projection without optimizations.\n\n\noriginal_apply_qkv\nOriginal implementation of QKV projection without optimizations.\n\n\npatch_self_attn_lora\nGiven an axolotl config, this method patches the inferred attention class forward\n\n\n\n\n\nmonkeypatch.lora_kernels.apply_lora_kernel_patches(model, cfg)\nApplies optimized Triton kernel patches to a PEFT model.\nPatches a PEFT model with optimized implementations for MLP and attention\ncomputations. The optimizations include custom Triton kernels for activation\nfunctions and specialized autograd functions for LoRA computations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nmodel\nPeftModelForCausalLM\nA PEFT model to be patched with optimized kernels.\nrequired\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nPeftModelForCausalLM\nPeftModelForCausalLM\nThe patched model with optimized kernels.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTypeError\nIf the provided model is not a PeftModelForCausalLM.\n\n\n\nNotImplementedError\nIf the model type is not supported.\n\n\n\nAssertionError\nIf multiple adapters are active (currently unsupported).\n\n\n\n\n\n\nThe optimizations require LoRA adapters with no dropout and no bias terms. The\nfunction will skip patching if these conditions aren’t met.\n\n\n\n\nmonkeypatch.lora_kernels.get_attention_cls_from_config(cfg)\nGet the appropriate attention class by inspecting the model config.\nUses dynamic import to support any model architecture that follows\nthe standard transformers naming convention.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nType[nn.Module]\nThe appropriate attention class for the model.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nValueError\nIf base_model not specified or attention class cannot be imported\n\n\n\nImportError\nIf the model module or attention class doesn’t exist\n\n\n\n\n\n\n\nmonkeypatch.lora_kernels.original_apply_o(self, hidden_states)\nOriginal implementation of output projection without optimizations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nself\nnn.Module\nThe attention module instance.\nrequired\n\n\nhidden_states\ntorch.Tensor\nInput tensor of shape [batch_size, seq_len, hidden_dim]`.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nThe output projection result.\n\n\n\n\n\n\n\nmonkeypatch.lora_kernels.original_apply_qkv(self, hidden_states)\nOriginal implementation of QKV projection without optimizations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nself\nnn.Module\nThe attention module instance.\nrequired\n\n\nhidden_states\ntorch.Tensor\nInput tensor of shape [batch_size, seq_len, hidden_dim].\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[torch.Tensor, torch.Tensor, torch.Tensor]\nA tuple (query_states, key_states, value_states) containing the projected states for query, key, and value.\n\n\n\n\n\n\n\nmonkeypatch.lora_kernels.patch_self_attn_lora(cfg)\nGiven an axolotl config, this method patches the inferred attention class forward\npass with optimized LoRA implementations.\nIt modifies the attention class to use optimized QKV and output projections. The\noriginal implementation is preserved and can be restored if needed.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nAssertionError\nIf the required code blocks are not found in the attention implementation." + "text": "Name\nDescription\n\n\n\n\nInterpolatingLogScheduler\nA scheduler that interpolates learning rates in a logarithmic fashion\n\n\nRexLR\nReflected Exponential (REX) learning rate scheduler.\n\n\n\n\n\nutils.schedulers.InterpolatingLogScheduler(\n optimizer,\n num_steps,\n min_lr,\n max_lr,\n last_epoch=-1,\n)\nA scheduler that interpolates learning rates in a logarithmic fashion\n\n\n\nutils.schedulers.RexLR(\n optimizer,\n max_lr,\n min_lr,\n total_steps=0,\n num_warmup_steps=0,\n last_step=0,\n)\nReflected Exponential (REX) learning rate scheduler.\n\nOriginal implementation: https://github.com/IvanVassi/REX_LR\nOriginal license: Apache 2.0\nBased on: https://arxiv.org/abs/2107.04197\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\noptimizer\ntorch.optim.Optimizer\nThe optimizer to schedule the learning rate for.\nrequired\n\n\nmax_lr\nfloat\nThe maximum learning rate.\nrequired\n\n\nmin_lr\nfloat\nThe minimum learning rate.\nrequired\n\n\ntotal_steps\nint\nThe total number of training steps.\n0\n\n\nnum_warmup_steps\nint\nThe number of warmup steps.\n0\n\n\nlast_step\nint\nThe index of last step.\n0" }, { - "objectID": "docs/api/monkeypatch.lora_kernels.html#classes", - "href": "docs/api/monkeypatch.lora_kernels.html#classes", - "title": "monkeypatch.lora_kernels", + "objectID": "docs/api/utils.schedulers.html#functions", + "href": "docs/api/utils.schedulers.html#functions", + "title": "utils.schedulers", "section": "", - "text": "Name\nDescription\n\n\n\n\nFakeMLP\nplaceholder MLP for triton patching\n\n\n\n\n\nmonkeypatch.lora_kernels.FakeMLP(self, gate_proj, up_proj, down_proj)\nplaceholder MLP for triton patching" + "text": "Name\nDescription\n\n\n\n\nget_cosine_schedule_with_min_lr\n\n\n\nget_cosine_schedule_with_quadratic_warmup\nCreate a schedule with a learning rate that decreases following the values of the cosine function between the\n\n\nget_cosine_schedule_with_warmup_decay_constant\nImplementation of Continual Pre-Training of Large Language Models: How to (re)warm your model? (https://arxiv.org/pdf/2308.04014.pdf)\n\n\n\n\n\nutils.schedulers.get_cosine_schedule_with_min_lr(\n optimizer,\n num_warmup_steps,\n num_training_steps,\n min_lr_ratio=0.0,\n)\n\n\n\nlinear warmup from 0 -> max_lr over num_warmup_steps\ncosine learning rate annealing from max_lr -> min_lr over num_training_steps\n\n\n\n\n\nutils.schedulers.get_cosine_schedule_with_quadratic_warmup(\n optimizer,\n num_warmup_steps,\n num_training_steps,\n num_cycles=0.5,\n last_epoch=-1,\n)\nCreate a schedule with a learning rate that decreases following the values of the cosine function between the\ninitial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the\ninitial lr set in the optimizer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\noptimizer\n[~torch.optim.Optimizer]\nThe optimizer for which to schedule the learning rate.\nrequired\n\n\nnum_warmup_steps\nint\nThe number of steps for the warmup phase.\nrequired\n\n\nnum_training_steps\nint\nThe total number of training steps.\nrequired\n\n\nnum_cycles\nfloat, optional, defaults to 0.5\nThe number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine).\n0.5\n\n\nlast_epoch\nint, optional, defaults to -1\nThe index of the last epoch when resuming training.\n-1\n\n\n\n\n\n\ntorch.optim.lr_scheduler.LambdaLR with the appropriate schedule.\n\n\n\n\nutils.schedulers.get_cosine_schedule_with_warmup_decay_constant(\n optimizer,\n num_warmup_steps,\n num_training_steps,\n constant_lr_ratio,\n min_lr_ratio,\n num_cycles=0.5,\n last_epoch=-1,\n)\nImplementation of Continual Pre-Training of Large Language Models: How to (re)warm your model? (https://arxiv.org/pdf/2308.04014.pdf)\nCreate a schedule with a learning rate that decreases following the values of the cosine function between the\ninitial lr set in the optimizer to min_lr_ratio until num_training_steps * constant_lr_ratio, after constant_rate returns constant value of min_rate\n, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\noptimizer\n[~torch.optim.Optimizer]\nThe optimizer for which to schedule the learning rate.\nrequired\n\n\nnum_warmup_steps\nint\nThe number of steps for the warmup phase.\nrequired\n\n\nnum_training_steps\nint\nThe total number of training steps.\nrequired\n\n\nconstant_lr_ratio\nfloat\n(float): The ratio of num_training_steps to decrease by cosine function.\nrequired\n\n\nmin_lr_ratio\nfloat\n(float): The ratio of maximum learning rate for cosine function to decay to minimum learning rate. | _required_ | | num_cycles |float, *optional*, defaults to 0.5 | The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). |0.5| | last_epoch |int, *optional*, defaults to -1 | The index of the last epoch when resuming training. |-1`\n\n\n\n\n\n\n\ntorch.optim.lr_scheduler.LambdaLR with the appropriate schedule." }, { - "objectID": "docs/api/monkeypatch.lora_kernels.html#functions", - "href": "docs/api/monkeypatch.lora_kernels.html#functions", - "title": "monkeypatch.lora_kernels", + "objectID": "docs/api/utils.schemas.integrations.html", + "href": "docs/api/utils.schemas.integrations.html", + "title": "utils.schemas.integrations", "section": "", - "text": "Name\nDescription\n\n\n\n\napply_lora_kernel_patches\nApplies optimized Triton kernel patches to a PEFT model.\n\n\nget_attention_cls_from_config\nGet the appropriate attention class by inspecting the model config.\n\n\noriginal_apply_o\nOriginal implementation of output projection without optimizations.\n\n\noriginal_apply_qkv\nOriginal implementation of QKV projection without optimizations.\n\n\npatch_self_attn_lora\nGiven an axolotl config, this method patches the inferred attention class forward\n\n\n\n\n\nmonkeypatch.lora_kernels.apply_lora_kernel_patches(model, cfg)\nApplies optimized Triton kernel patches to a PEFT model.\nPatches a PEFT model with optimized implementations for MLP and attention\ncomputations. The optimizations include custom Triton kernels for activation\nfunctions and specialized autograd functions for LoRA computations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nmodel\nPeftModelForCausalLM\nA PEFT model to be patched with optimized kernels.\nrequired\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nPeftModelForCausalLM\nPeftModelForCausalLM\nThe patched model with optimized kernels.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTypeError\nIf the provided model is not a PeftModelForCausalLM.\n\n\n\nNotImplementedError\nIf the model type is not supported.\n\n\n\nAssertionError\nIf multiple adapters are active (currently unsupported).\n\n\n\n\n\n\nThe optimizations require LoRA adapters with no dropout and no bias terms. The\nfunction will skip patching if these conditions aren’t met.\n\n\n\n\nmonkeypatch.lora_kernels.get_attention_cls_from_config(cfg)\nGet the appropriate attention class by inspecting the model config.\nUses dynamic import to support any model architecture that follows\nthe standard transformers naming convention.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nType[nn.Module]\nThe appropriate attention class for the model.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nValueError\nIf base_model not specified or attention class cannot be imported\n\n\n\nImportError\nIf the model module or attention class doesn’t exist\n\n\n\n\n\n\n\nmonkeypatch.lora_kernels.original_apply_o(self, hidden_states)\nOriginal implementation of output projection without optimizations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nself\nnn.Module\nThe attention module instance.\nrequired\n\n\nhidden_states\ntorch.Tensor\nInput tensor of shape [batch_size, seq_len, hidden_dim]`.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nThe output projection result.\n\n\n\n\n\n\n\nmonkeypatch.lora_kernels.original_apply_qkv(self, hidden_states)\nOriginal implementation of QKV projection without optimizations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nself\nnn.Module\nThe attention module instance.\nrequired\n\n\nhidden_states\ntorch.Tensor\nInput tensor of shape [batch_size, seq_len, hidden_dim].\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[torch.Tensor, torch.Tensor, torch.Tensor]\nA tuple (query_states, key_states, value_states) containing the projected states for query, key, and value.\n\n\n\n\n\n\n\nmonkeypatch.lora_kernels.patch_self_attn_lora(cfg)\nGiven an axolotl config, this method patches the inferred attention class forward\npass with optimized LoRA implementations.\nIt modifies the attention class to use optimized QKV and output projections. The\noriginal implementation is preserved and can be restored if needed.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nAssertionError\nIf the required code blocks are not found in the attention implementation." + "text": "utils.schemas.integrations\nPydantic models for Axolotl integrations\n\n\n\n\n\nName\nDescription\n\n\n\n\nCometConfig\nComet configuration subset\n\n\nGradioConfig\nGradio configuration subset\n\n\nLISAConfig\nLISA configuration subset\n\n\nMLFlowConfig\nMLFlow configuration subset\n\n\nRayConfig\nRay launcher configuration subset\n\n\nWandbConfig\nWandb configuration subset\n\n\n\n\n\nutils.schemas.integrations.CometConfig()\nComet configuration subset\n\n\n\nutils.schemas.integrations.GradioConfig()\nGradio configuration subset\n\n\n\nutils.schemas.integrations.LISAConfig()\nLISA configuration subset\n\n\n\nutils.schemas.integrations.MLFlowConfig()\nMLFlow configuration subset\n\n\n\nutils.schemas.integrations.RayConfig()\nRay launcher configuration subset\n\n\n\nutils.schemas.integrations.WandbConfig()\nWandb configuration subset" }, { - "objectID": "docs/api/cli.merge_lora.html", - "href": "docs/api/cli.merge_lora.html", - "title": "cli.merge_lora", + "objectID": "docs/api/utils.schemas.integrations.html#classes", + "href": "docs/api/utils.schemas.integrations.html#classes", + "title": "utils.schemas.integrations", "section": "", - "text": "cli.merge_lora\nCLI to merge a trained LoRA into a base model.\n\n\n\n\n\nName\nDescription\n\n\n\n\ndo_cli\nParses axolotl config, CLI args, and calls do_merge_lora. Note that various\n\n\ndo_merge_lora\nCalls transformers’ merge_and_unload on the model given in the axolotl config\n\n\n\n\n\ncli.merge_lora.do_cli(config=Path('examples/'), **kwargs)\nParses axolotl config, CLI args, and calls do_merge_lora. Note that various\nconfig values will be overwritten to allow the LoRA merge logic to work as expected\n(load_in_8bit=False, load_in4bit=False, flash_attention=False, etc.).\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nPath to axolotl config YAML file.\nPath('examples/')\n\n\nkwargs\n\nAdditional keyword arguments to override config file values.\n{}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nValueError\nIf target directory for LoRA merged model does not exist.\n\n\n\n\n\n\n\ncli.merge_lora.do_merge_lora(cfg)\nCalls transformers’ merge_and_unload on the model given in the axolotl config\nalong with the LoRA adapters to combine them into a single base model.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired" + "text": "Name\nDescription\n\n\n\n\nCometConfig\nComet configuration subset\n\n\nGradioConfig\nGradio configuration subset\n\n\nLISAConfig\nLISA configuration subset\n\n\nMLFlowConfig\nMLFlow configuration subset\n\n\nRayConfig\nRay launcher configuration subset\n\n\nWandbConfig\nWandb configuration subset\n\n\n\n\n\nutils.schemas.integrations.CometConfig()\nComet configuration subset\n\n\n\nutils.schemas.integrations.GradioConfig()\nGradio configuration subset\n\n\n\nutils.schemas.integrations.LISAConfig()\nLISA configuration subset\n\n\n\nutils.schemas.integrations.MLFlowConfig()\nMLFlow configuration subset\n\n\n\nutils.schemas.integrations.RayConfig()\nRay launcher configuration subset\n\n\n\nutils.schemas.integrations.WandbConfig()\nWandb configuration subset" }, { - "objectID": "docs/api/cli.merge_lora.html#functions", - "href": "docs/api/cli.merge_lora.html#functions", - "title": "cli.merge_lora", + "objectID": "docs/api/utils.tokenization.html", + "href": "docs/api/utils.tokenization.html", + "title": "utils.tokenization", "section": "", - "text": "Name\nDescription\n\n\n\n\ndo_cli\nParses axolotl config, CLI args, and calls do_merge_lora. Note that various\n\n\ndo_merge_lora\nCalls transformers’ merge_and_unload on the model given in the axolotl config\n\n\n\n\n\ncli.merge_lora.do_cli(config=Path('examples/'), **kwargs)\nParses axolotl config, CLI args, and calls do_merge_lora. Note that various\nconfig values will be overwritten to allow the LoRA merge logic to work as expected\n(load_in_8bit=False, load_in4bit=False, flash_attention=False, etc.).\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nPath to axolotl config YAML file.\nPath('examples/')\n\n\nkwargs\n\nAdditional keyword arguments to override config file values.\n{}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nValueError\nIf target directory for LoRA merged model does not exist.\n\n\n\n\n\n\n\ncli.merge_lora.do_merge_lora(cfg)\nCalls transformers’ merge_and_unload on the model given in the axolotl config\nalong with the LoRA adapters to combine them into a single base model.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired" + "text": "utils.tokenization\nModule for tokenization utilities\n\n\n\n\n\nName\nDescription\n\n\n\n\ncolor_token_for_rl_debug\nHelper function to color tokens based on their type.\n\n\nprocess_tokens_for_rl_debug\nHelper function to process and color tokens.\n\n\n\n\n\nutils.tokenization.color_token_for_rl_debug(\n decoded_token,\n encoded_token,\n color,\n text_only,\n)\nHelper function to color tokens based on their type.\n\n\n\nutils.tokenization.process_tokens_for_rl_debug(\n tokens,\n color,\n tokenizer,\n text_only,\n)\nHelper function to process and color tokens." }, { - "objectID": "docs/api/utils.trainer.html", - "href": "docs/api/utils.trainer.html", - "title": "utils.trainer", + "objectID": "docs/api/utils.tokenization.html#functions", + "href": "docs/api/utils.tokenization.html#functions", + "title": "utils.tokenization", "section": "", - "text": "utils.trainer\nModule containing the Trainer class and related functions\n\n\n\n\n\nName\nDescription\n\n\n\n\nadd_pose_position_ids\nuse the PoSE technique to extend the context length by randomly skipping\n\n\nadd_position_ids\nHandle both single-example and batched data.\n\n\ndrop_long_seq\nDrop samples whose sequence length is either too long (> sequence_len)\n\n\nsetup_trainer\nHelper method for instantiating and building a (causal or RLHF) trainer.\n\n\n\n\n\nutils.trainer.add_pose_position_ids(\n sample,\n max_context_len=32768,\n split_on_token_ids=None,\n chunks=2,\n)\nuse the PoSE technique to extend the context length by randomly skipping\npositions in the context. We only want to skip right before tokens in\nthe split_on_token_ids list. We should attempt to randomly distribute\nthe skips, but we don’t need the final position_ids to be the full\ncontext_len. There may be multiple turns in the context, so we want to\nmake sure we take into account the maximum possible number of skips\nremaining in each sample.\n\n\n\nutils.trainer.add_position_ids(sample)\nHandle both single-example and batched data.\n- single example: sample[‘input_ids’] is a list[int]\n- batched data: sample[‘input_ids’] is a list[list[int]]\n\n\n\nutils.trainer.drop_long_seq(sample, sequence_len=2048, min_sequence_len=2)\nDrop samples whose sequence length is either too long (> sequence_len)\nor too short (< min_sequence_len).\nWorks for both single-example (list[int]) or batched (list[list[int]]).\n\n\n\nutils.trainer.setup_trainer(\n cfg,\n train_dataset,\n eval_dataset,\n model,\n tokenizer,\n processor,\n total_num_steps,\n model_ref=None,\n peft_config=None,\n)\nHelper method for instantiating and building a (causal or RLHF) trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\n\nAxolotl config object containing training parameters.\nrequired\n\n\ntrain_dataset\n\nDataset to use for training.\nrequired\n\n\neval_dataset\n\nDataset to use for evaluation.\nrequired\n\n\nmodel\n\nThe model to train.\nrequired\n\n\ntokenizer\n\nTokenizer for processing text input.\nrequired\n\n\nprocessor\n\nProcessor for data preparation.\nrequired\n\n\ntotal_num_steps\n\nThe total number of training steps.\nrequired\n\n\nmodel_ref\n\nOptional reference model for RLHF training. Default is None.\nNone\n\n\npeft_config\n\nOptional PEFT (Parameter-Efficient Fine-Tuning) configuration. Default is None.\nNone\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\n\nA trainer instance (either HFRLTrainer or HFCausalTrainer) configured based on the provided parameters." + "text": "Name\nDescription\n\n\n\n\ncolor_token_for_rl_debug\nHelper function to color tokens based on their type.\n\n\nprocess_tokens_for_rl_debug\nHelper function to process and color tokens.\n\n\n\n\n\nutils.tokenization.color_token_for_rl_debug(\n decoded_token,\n encoded_token,\n color,\n text_only,\n)\nHelper function to color tokens based on their type.\n\n\n\nutils.tokenization.process_tokens_for_rl_debug(\n tokens,\n color,\n tokenizer,\n text_only,\n)\nHelper function to process and color tokens." }, { - "objectID": "docs/api/utils.trainer.html#functions", - "href": "docs/api/utils.trainer.html#functions", - "title": "utils.trainer", + "objectID": "docs/api/utils.distributed.html", + "href": "docs/api/utils.distributed.html", + "title": "utils.distributed", "section": "", - "text": "Name\nDescription\n\n\n\n\nadd_pose_position_ids\nuse the PoSE technique to extend the context length by randomly skipping\n\n\nadd_position_ids\nHandle both single-example and batched data.\n\n\ndrop_long_seq\nDrop samples whose sequence length is either too long (> sequence_len)\n\n\nsetup_trainer\nHelper method for instantiating and building a (causal or RLHF) trainer.\n\n\n\n\n\nutils.trainer.add_pose_position_ids(\n sample,\n max_context_len=32768,\n split_on_token_ids=None,\n chunks=2,\n)\nuse the PoSE technique to extend the context length by randomly skipping\npositions in the context. We only want to skip right before tokens in\nthe split_on_token_ids list. We should attempt to randomly distribute\nthe skips, but we don’t need the final position_ids to be the full\ncontext_len. There may be multiple turns in the context, so we want to\nmake sure we take into account the maximum possible number of skips\nremaining in each sample.\n\n\n\nutils.trainer.add_position_ids(sample)\nHandle both single-example and batched data.\n- single example: sample[‘input_ids’] is a list[int]\n- batched data: sample[‘input_ids’] is a list[list[int]]\n\n\n\nutils.trainer.drop_long_seq(sample, sequence_len=2048, min_sequence_len=2)\nDrop samples whose sequence length is either too long (> sequence_len)\nor too short (< min_sequence_len).\nWorks for both single-example (list[int]) or batched (list[list[int]]).\n\n\n\nutils.trainer.setup_trainer(\n cfg,\n train_dataset,\n eval_dataset,\n model,\n tokenizer,\n processor,\n total_num_steps,\n model_ref=None,\n peft_config=None,\n)\nHelper method for instantiating and building a (causal or RLHF) trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\n\nAxolotl config object containing training parameters.\nrequired\n\n\ntrain_dataset\n\nDataset to use for training.\nrequired\n\n\neval_dataset\n\nDataset to use for evaluation.\nrequired\n\n\nmodel\n\nThe model to train.\nrequired\n\n\ntokenizer\n\nTokenizer for processing text input.\nrequired\n\n\nprocessor\n\nProcessor for data preparation.\nrequired\n\n\ntotal_num_steps\n\nThe total number of training steps.\nrequired\n\n\nmodel_ref\n\nOptional reference model for RLHF training. Default is None.\nNone\n\n\npeft_config\n\nOptional PEFT (Parameter-Efficient Fine-Tuning) configuration. Default is None.\nNone\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\n\nA trainer instance (either HFRLTrainer or HFCausalTrainer) configured based on the provided parameters." + "text": "utils.distributed\nutility helpers for distributed checks\n\n\n\n\n\nName\nDescription\n\n\n\n\nbarrier\nActs as a barrier to wait for all processes. This ensures that all processes\n\n\ncleanup_distributed\nDestroy process group if torch distributed is initialized. Called in training early\n\n\ncompute_and_broadcast\nCompute a value using the function ‘fn’ only on the specified rank (default is 0).\n\n\ngather_from_all_ranks\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\n\n\ngather_scalar_from_all_ranks\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\n\n\nis_distributed\nCheck if distributed training is initialized.\n\n\nis_main_process\nCheck if the current process is the main process. If not in distributed mode,\n\n\nreduce_and_broadcast\nRun a callable ‘fn1’ on all ranks, gather the results, reduce them using ‘fn2’,\n\n\nzero_first\nruns the wrapped context so that rank 0 runs first before other ranks\n\n\n\n\n\nutils.distributed.barrier()\nActs as a barrier to wait for all processes. This ensures that all processes\nreach the barrier before proceeding further.\n\n\n\nutils.distributed.cleanup_distributed()\nDestroy process group if torch distributed is initialized. Called in training early\ntermination or when training successfully completes.\n\n\n\nutils.distributed.compute_and_broadcast(fn)\nCompute a value using the function ‘fn’ only on the specified rank (default is 0).\nThe value is then broadcasted to all other ranks.\nArgs:\n- fn (callable): A function that computes the value. This should not have any side effects.\n- rank (int, optional): The rank that computes the value. Default is 0.\nReturns:\n- The computed value (int or float).\n\n\n\nutils.distributed.gather_from_all_ranks(fn, world_size=1)\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\nArgs:\n- fn (callable): A function that computes the value. This should not have any side effects.\n- rank (int, optional): The rank that gathers the values. Default is 0.\n- world_size (int, optional): Total number of processes in the current distributed setup.\nReturns:\n- A list of computed values from all ranks if on the gathering rank, otherwise None.\n\n\n\nutils.distributed.gather_scalar_from_all_ranks(fn, world_size=1)\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\nArgs:\n- fn (callable): A function that computes the value. This should not have any side effects.\n- rank (int, optional): The rank that gathers the values. Default is 0.\n- world_size (int, optional): Total number of processes in the current distributed setup.\nReturns:\n- A list of computed values from all ranks if on the gathering rank, otherwise None.\n\n\n\nutils.distributed.is_distributed()\nCheck if distributed training is initialized.\n\n\n\nutils.distributed.is_main_process(use_environ=False)\nCheck if the current process is the main process. If not in distributed mode,\nalways return True.\nArgs:\n- use_environ (bool, optional): Use environment variable to determine main process.\nReturns:\n- bool: True if the current process is the main process, False otherwise.\n\n\n\nutils.distributed.reduce_and_broadcast(fn1, fn2)\nRun a callable ‘fn1’ on all ranks, gather the results, reduce them using ‘fn2’,\nand then broadcast the reduced result to all ranks.\nArgs:\n- fn1 (callable): A function that computes the value on each rank.\n- fn2 (callable): A reduction function that takes a list of values and returns a single value.\n- world_size (int, optional): Total number of processes in the current distributed setup.\nReturns:\n- The reduced and broadcasted value.\n\n\n\nutils.distributed.zero_first(is_main)\nruns the wrapped context so that rank 0 runs first before other ranks" }, { - "objectID": "docs/api/utils.dict.html", - "href": "docs/api/utils.dict.html", - "title": "utils.dict", + "objectID": "docs/api/utils.distributed.html#functions", + "href": "docs/api/utils.distributed.html#functions", + "title": "utils.distributed", "section": "", - "text": "utils.dict\nModule containing the DictDefault class\n\n\n\n\n\nName\nDescription\n\n\n\n\nDictDefault\nA Dict that returns None instead of returning empty Dict for missing keys.\n\n\n\n\n\nutils.dict.DictDefault()\nA Dict that returns None instead of returning empty Dict for missing keys." + "text": "Name\nDescription\n\n\n\n\nbarrier\nActs as a barrier to wait for all processes. This ensures that all processes\n\n\ncleanup_distributed\nDestroy process group if torch distributed is initialized. Called in training early\n\n\ncompute_and_broadcast\nCompute a value using the function ‘fn’ only on the specified rank (default is 0).\n\n\ngather_from_all_ranks\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\n\n\ngather_scalar_from_all_ranks\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\n\n\nis_distributed\nCheck if distributed training is initialized.\n\n\nis_main_process\nCheck if the current process is the main process. If not in distributed mode,\n\n\nreduce_and_broadcast\nRun a callable ‘fn1’ on all ranks, gather the results, reduce them using ‘fn2’,\n\n\nzero_first\nruns the wrapped context so that rank 0 runs first before other ranks\n\n\n\n\n\nutils.distributed.barrier()\nActs as a barrier to wait for all processes. This ensures that all processes\nreach the barrier before proceeding further.\n\n\n\nutils.distributed.cleanup_distributed()\nDestroy process group if torch distributed is initialized. Called in training early\ntermination or when training successfully completes.\n\n\n\nutils.distributed.compute_and_broadcast(fn)\nCompute a value using the function ‘fn’ only on the specified rank (default is 0).\nThe value is then broadcasted to all other ranks.\nArgs:\n- fn (callable): A function that computes the value. This should not have any side effects.\n- rank (int, optional): The rank that computes the value. Default is 0.\nReturns:\n- The computed value (int or float).\n\n\n\nutils.distributed.gather_from_all_ranks(fn, world_size=1)\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\nArgs:\n- fn (callable): A function that computes the value. This should not have any side effects.\n- rank (int, optional): The rank that gathers the values. Default is 0.\n- world_size (int, optional): Total number of processes in the current distributed setup.\nReturns:\n- A list of computed values from all ranks if on the gathering rank, otherwise None.\n\n\n\nutils.distributed.gather_scalar_from_all_ranks(fn, world_size=1)\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\nArgs:\n- fn (callable): A function that computes the value. This should not have any side effects.\n- rank (int, optional): The rank that gathers the values. Default is 0.\n- world_size (int, optional): Total number of processes in the current distributed setup.\nReturns:\n- A list of computed values from all ranks if on the gathering rank, otherwise None.\n\n\n\nutils.distributed.is_distributed()\nCheck if distributed training is initialized.\n\n\n\nutils.distributed.is_main_process(use_environ=False)\nCheck if the current process is the main process. If not in distributed mode,\nalways return True.\nArgs:\n- use_environ (bool, optional): Use environment variable to determine main process.\nReturns:\n- bool: True if the current process is the main process, False otherwise.\n\n\n\nutils.distributed.reduce_and_broadcast(fn1, fn2)\nRun a callable ‘fn1’ on all ranks, gather the results, reduce them using ‘fn2’,\nand then broadcast the reduced result to all ranks.\nArgs:\n- fn1 (callable): A function that computes the value on each rank.\n- fn2 (callable): A reduction function that takes a list of values and returns a single value.\n- world_size (int, optional): Total number of processes in the current distributed setup.\nReturns:\n- The reduced and broadcasted value.\n\n\n\nutils.distributed.zero_first(is_main)\nruns the wrapped context so that rank 0 runs first before other ranks" }, { - "objectID": "docs/api/utils.dict.html#classes", - "href": "docs/api/utils.dict.html#classes", - "title": "utils.dict", + "objectID": "docs/api/prompt_strategies.bradley_terry.llama3.html", + "href": "docs/api/prompt_strategies.bradley_terry.llama3.html", + "title": "prompt_strategies.bradley_terry.llama3", "section": "", - "text": "Name\nDescription\n\n\n\n\nDictDefault\nA Dict that returns None instead of returning empty Dict for missing keys.\n\n\n\n\n\nutils.dict.DictDefault()\nA Dict that returns None instead of returning empty Dict for missing keys." + "text": "prompt_strategies.bradley_terry.llama3\nchatml transforms for datasets with system, input, chosen, rejected to match llama3 chat template\n\n\n\n\n\nName\nDescription\n\n\n\n\nicr\nchatml transforms for datasets with system, input, chosen, rejected\n\n\n\n\n\nprompt_strategies.bradley_terry.llama3.icr(cfg, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected\nex. https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs" }, { - "objectID": "docs/api/kernels.quantize.html", - "href": "docs/api/kernels.quantize.html", - "title": "kernels.quantize", + "objectID": "docs/api/prompt_strategies.bradley_terry.llama3.html#functions", + "href": "docs/api/prompt_strategies.bradley_terry.llama3.html#functions", + "title": "prompt_strategies.bradley_terry.llama3", "section": "", - "text": "kernels.quantize\nDequantization utilities for bitsandbytes integration.\n\n\n\n\n\nName\nDescription\n\n\n\n\ndequantize\nFast NF4 dequantization using bitsandbytes CUDA kernels.\n\n\n\n\n\nkernels.quantize.dequantize(W, quant_state=None, out=None)\nFast NF4 dequantization using bitsandbytes CUDA kernels.\nPerforms efficient dequantization of weights from NF4 format using bitsandbytes’\noptimized CUDA implementations. Supports both legacy list and new QuantState\nformats.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nW\ntorch.Tensor\nQuantized weight tensor to dequantize\nrequired\n\n\nquant_state\nQuantState | list | None\nQuantization state containing metadata needed for dequantization. Can be either a QuantState object or legacy list format. If None, returns W unchanged.\nNone\n\n\nout\ntorch.Tensor | None\nOptional output tensor for storing dequantized results. Must match expected shape and dtype if provided.\nNone\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nDequantized tensor in the specified dtype (fp16 or bf16). Will be transposed if\n\n\n\ntorch.Tensor\ninput W was transposed.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nAssertionError\nIf provided output tensor doesn’t match expected shape / dtype.\n\n\n\n\n\n\nUses CUDA streams for better performance when available in newer bitsandbytes\nversions (>0.43.3)." + "text": "Name\nDescription\n\n\n\n\nicr\nchatml transforms for datasets with system, input, chosen, rejected\n\n\n\n\n\nprompt_strategies.bradley_terry.llama3.icr(cfg, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected\nex. https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs" }, { - "objectID": "docs/api/kernels.quantize.html#functions", - "href": "docs/api/kernels.quantize.html#functions", - "title": "kernels.quantize", + "objectID": "docs/api/common.const.html", + "href": "docs/api/common.const.html", + "title": "common.const", "section": "", - "text": "Name\nDescription\n\n\n\n\ndequantize\nFast NF4 dequantization using bitsandbytes CUDA kernels.\n\n\n\n\n\nkernels.quantize.dequantize(W, quant_state=None, out=None)\nFast NF4 dequantization using bitsandbytes CUDA kernels.\nPerforms efficient dequantization of weights from NF4 format using bitsandbytes’\noptimized CUDA implementations. Supports both legacy list and new QuantState\nformats.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nW\ntorch.Tensor\nQuantized weight tensor to dequantize\nrequired\n\n\nquant_state\nQuantState | list | None\nQuantization state containing metadata needed for dequantization. Can be either a QuantState object or legacy list format. If None, returns W unchanged.\nNone\n\n\nout\ntorch.Tensor | None\nOptional output tensor for storing dequantized results. Must match expected shape and dtype if provided.\nNone\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nDequantized tensor in the specified dtype (fp16 or bf16). Will be transposed if\n\n\n\ntorch.Tensor\ninput W was transposed.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nAssertionError\nIf provided output tensor doesn’t match expected shape / dtype.\n\n\n\n\n\n\nUses CUDA streams for better performance when available in newer bitsandbytes\nversions (>0.43.3)." + "text": "common.const\ncommon.const\nVarious shared constants" }, { - "objectID": "docs/api/core.trainers.utils.html", - "href": "docs/api/core.trainers.utils.html", - "title": "core.trainers.utils", + "objectID": "docs/api/loaders.processor.html", + "href": "docs/api/loaders.processor.html", + "title": "loaders.processor", "section": "", - "text": "core.trainers.utils\ncore.trainers.utils\nUtils for Axolotl trainers" + "text": "loaders.processor\nloaders.processor\nProcessor loading functionality for multi-modal models" }, { - "objectID": "docs/api/monkeypatch.data.batch_dataset_fetcher.html", - "href": "docs/api/monkeypatch.data.batch_dataset_fetcher.html", - "title": "monkeypatch.data.batch_dataset_fetcher", + "objectID": "docs/api/evaluate.html", + "href": "docs/api/evaluate.html", + "title": "evaluate", "section": "", - "text": "monkeypatch.data.batch_dataset_fetcher\nmonkeypatch.data.batch_dataset_fetcher\nmonkey patches for the dataset fetcher to handle batches of packed indexes" + "text": "evaluate\nModule for evaluating models.\n\n\n\n\n\nName\nDescription\n\n\n\n\nevaluate\nEvaluate a model on training and validation datasets.\n\n\nevaluate_dataset\nHelper function to evaluate a single dataset.\n\n\n\n\n\nevaluate.evaluate(cfg, dataset_meta)\nEvaluate a model on training and validation datasets.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ndataset_meta\nTrainDatasetMeta\nDataset metadata containing training and evaluation datasets.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nDict[str, float]\nDictionary mapping metric names to their values.\n\n\n\n\n\n\n\nevaluate.evaluate_dataset(trainer, dataset, dataset_type, flash_optimum=False)\nHelper function to evaluate a single dataset.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntrainer\nTrainer\nThe trainer instance.\nrequired\n\n\ndataset\nDataset\nDataset to evaluate.\nrequired\n\n\ndataset_type\nstr\nType of dataset (‘train’ or ‘eval’).\nrequired\n\n\nflash_optimum\nbool\nWhether to use flash optimum.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nOptional[Dict[str, float]]\nDictionary of metrics or None if dataset is None." }, { - "objectID": "docs/api/utils.ctx_managers.sequence_parallel.html", - "href": "docs/api/utils.ctx_managers.sequence_parallel.html", - "title": "utils.ctx_managers.sequence_parallel", + "objectID": "docs/api/evaluate.html#functions", + "href": "docs/api/evaluate.html#functions", + "title": "evaluate", "section": "", - "text": "utils.ctx_managers.sequence_parallel\nModule for Axolotl trainer sequence parallelism manager and utilities\n\n\n\n\n\nName\nDescription\n\n\n\n\nAllGatherWithGrad\nCustom autograd function for all-gather to preserve gradients.\n\n\nSequenceParallelContextManager\nContext manager for sequence parallelism operations.\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.AllGatherWithGrad()\nCustom autograd function for all-gather to preserve gradients.\n\n\n\n\n\nName\nDescription\n\n\n\n\nbackward\nBackward pass for all-gather operation.\n\n\nforward\nForward pass of all-gather of data with sequence dimension.\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.AllGatherWithGrad.backward(\n ctx,\n grad_output,\n)\nBackward pass for all-gather operation.\nExtracts the gradient slice corresponding to this rank’s original input\nfrom the full gradient tensor.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nctx\ntorch.autograd.function.FunctionCtx\ntorch.autograd function context.\nrequired\n\n\ngrad_output\ntorch.Tensor\nGradient from subsequent layers with respect to the concatenated output tensor.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[torch.Tensor, None]\nTuple containing the gradient slice for this rank’s input tensor and None for the process group parameter which doesn’t require gradients.\n\n\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.AllGatherWithGrad.forward(\n ctx,\n input_tensor,\n group,\n)\nForward pass of all-gather of data with sequence dimension.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nctx\ntorch.autograd.function.FunctionCtx\ntorch.autograd function context.\nrequired\n\n\ninput_tensor\ntorch.Tensor\nTensor from model output with sequence dimension.\nrequired\n\n\ngroup\ndist.ProcessGroup\ntorch.distributed process group.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nTensor from gathering the input_tensor from across the process group and concatenating along the sequence dimension.\n\n\n\n\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.SequenceParallelContextManager(\n self,\n models,\n sequence_parallel_degree,\n gradient_accumulation_steps,\n ring_attn_func,\n heads_k_stride,\n)\nContext manager for sequence parallelism operations.\nThis class provides a context that will automatically apply sequence parallelism\nduring model forward passes using a pre-forward hook, and gather outputs from\nacross the sequence parallelism group using a post-forward hook.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nmodels\nlist[nn.Module]\nList of models to apply sequence parallelism to pre- and post- forward hooks.\nrequired\n\n\nsequence_parallel_degree\nint\nNumber of processes to split sequences over.\nrequired\n\n\ngradient_accumulation_steps\nint\nNumber of steps to accumulate gradients over.\nrequired\n\n\nring_attn_func\nRingAttnFunc\nWhich ring attention function to use. Currently unused.\nrequired\n\n\nheads_k_stride\nint | None\nSequence parallelism K head stride size. Passed through to varlen_llama3 ring_flash_attn implementation.\nrequired\n\n\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\napply_sequence_parallelism\nApply sequence parallelism slicing to a batch.\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.apply_sequence_parallelism(\n batch,\n local_rank,\n local_world_size,\n gradient_accumulation_steps,\n ring_attn_func,\n)\nApply sequence parallelism slicing to a batch.\nSpecial handling is implemented for integer logits_to_keep, which indicates\nto only keep the last N tokens in the sequence during generation.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nbatch\ndict[str, torch.Tensor]\nBatch dictionary (e.g., input_ids, attention_mask, etc.).\nrequired\n\n\nlocal_rank\nint\nLocal rank in the sequence parallel group.\nrequired\n\n\nlocal_world_size\nint\nWorld size of the sequence parallel group.\nrequired\n\n\ngradient_accumulation_steps\nint\nNumber of steps to accumulate gradients over.\nrequired\n\n\nring_attn_func\nRingAttnFunc\nWhich ring attention function to use. Currently unused, but related to above TODO.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[dict[str, torch.Tensor], int, int]\ntuple of: - Batch dictionary with sliced tensors. - The original sequence length before padding. - The number of padding tokens added." + "text": "Name\nDescription\n\n\n\n\nevaluate\nEvaluate a model on training and validation datasets.\n\n\nevaluate_dataset\nHelper function to evaluate a single dataset.\n\n\n\n\n\nevaluate.evaluate(cfg, dataset_meta)\nEvaluate a model on training and validation datasets.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ndataset_meta\nTrainDatasetMeta\nDataset metadata containing training and evaluation datasets.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nDict[str, float]\nDictionary mapping metric names to their values.\n\n\n\n\n\n\n\nevaluate.evaluate_dataset(trainer, dataset, dataset_type, flash_optimum=False)\nHelper function to evaluate a single dataset.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntrainer\nTrainer\nThe trainer instance.\nrequired\n\n\ndataset\nDataset\nDataset to evaluate.\nrequired\n\n\ndataset_type\nstr\nType of dataset (‘train’ or ‘eval’).\nrequired\n\n\nflash_optimum\nbool\nWhether to use flash optimum.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nOptional[Dict[str, float]]\nDictionary of metrics or None if dataset is None." }, { - "objectID": "docs/api/utils.ctx_managers.sequence_parallel.html#classes", - "href": "docs/api/utils.ctx_managers.sequence_parallel.html#classes", - "title": "utils.ctx_managers.sequence_parallel", + "objectID": "docs/api/prompt_strategies.completion.html", + "href": "docs/api/prompt_strategies.completion.html", + "title": "prompt_strategies.completion", "section": "", - "text": "Name\nDescription\n\n\n\n\nAllGatherWithGrad\nCustom autograd function for all-gather to preserve gradients.\n\n\nSequenceParallelContextManager\nContext manager for sequence parallelism operations.\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.AllGatherWithGrad()\nCustom autograd function for all-gather to preserve gradients.\n\n\n\n\n\nName\nDescription\n\n\n\n\nbackward\nBackward pass for all-gather operation.\n\n\nforward\nForward pass of all-gather of data with sequence dimension.\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.AllGatherWithGrad.backward(\n ctx,\n grad_output,\n)\nBackward pass for all-gather operation.\nExtracts the gradient slice corresponding to this rank’s original input\nfrom the full gradient tensor.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nctx\ntorch.autograd.function.FunctionCtx\ntorch.autograd function context.\nrequired\n\n\ngrad_output\ntorch.Tensor\nGradient from subsequent layers with respect to the concatenated output tensor.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[torch.Tensor, None]\nTuple containing the gradient slice for this rank’s input tensor and None for the process group parameter which doesn’t require gradients.\n\n\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.AllGatherWithGrad.forward(\n ctx,\n input_tensor,\n group,\n)\nForward pass of all-gather of data with sequence dimension.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nctx\ntorch.autograd.function.FunctionCtx\ntorch.autograd function context.\nrequired\n\n\ninput_tensor\ntorch.Tensor\nTensor from model output with sequence dimension.\nrequired\n\n\ngroup\ndist.ProcessGroup\ntorch.distributed process group.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nTensor from gathering the input_tensor from across the process group and concatenating along the sequence dimension.\n\n\n\n\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.SequenceParallelContextManager(\n self,\n models,\n sequence_parallel_degree,\n gradient_accumulation_steps,\n ring_attn_func,\n heads_k_stride,\n)\nContext manager for sequence parallelism operations.\nThis class provides a context that will automatically apply sequence parallelism\nduring model forward passes using a pre-forward hook, and gather outputs from\nacross the sequence parallelism group using a post-forward hook.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nmodels\nlist[nn.Module]\nList of models to apply sequence parallelism to pre- and post- forward hooks.\nrequired\n\n\nsequence_parallel_degree\nint\nNumber of processes to split sequences over.\nrequired\n\n\ngradient_accumulation_steps\nint\nNumber of steps to accumulate gradients over.\nrequired\n\n\nring_attn_func\nRingAttnFunc\nWhich ring attention function to use. Currently unused.\nrequired\n\n\nheads_k_stride\nint | None\nSequence parallelism K head stride size. Passed through to varlen_llama3 ring_flash_attn implementation.\nrequired" + "text": "prompt_strategies.completion\nBasic completion text\n\n\n\n\n\nName\nDescription\n\n\n\n\nCompletionPromptTokenizingStrategy\nTokenizing strategy for Completion prompts.\n\n\nCompletionPrompter\nPrompter for completion\n\n\n\n\n\nprompt_strategies.completion.CompletionPromptTokenizingStrategy(\n *args,\n max_length=None,\n **kwargs,\n)\nTokenizing strategy for Completion prompts.\n\n\n\nprompt_strategies.completion.CompletionPrompter()\nPrompter for completion" }, { - "objectID": "docs/api/utils.ctx_managers.sequence_parallel.html#functions", - "href": "docs/api/utils.ctx_managers.sequence_parallel.html#functions", - "title": "utils.ctx_managers.sequence_parallel", + "objectID": "docs/api/prompt_strategies.completion.html#classes", + "href": "docs/api/prompt_strategies.completion.html#classes", + "title": "prompt_strategies.completion", "section": "", - "text": "Name\nDescription\n\n\n\n\napply_sequence_parallelism\nApply sequence parallelism slicing to a batch.\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.apply_sequence_parallelism(\n batch,\n local_rank,\n local_world_size,\n gradient_accumulation_steps,\n ring_attn_func,\n)\nApply sequence parallelism slicing to a batch.\nSpecial handling is implemented for integer logits_to_keep, which indicates\nto only keep the last N tokens in the sequence during generation.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nbatch\ndict[str, torch.Tensor]\nBatch dictionary (e.g., input_ids, attention_mask, etc.).\nrequired\n\n\nlocal_rank\nint\nLocal rank in the sequence parallel group.\nrequired\n\n\nlocal_world_size\nint\nWorld size of the sequence parallel group.\nrequired\n\n\ngradient_accumulation_steps\nint\nNumber of steps to accumulate gradients over.\nrequired\n\n\nring_attn_func\nRingAttnFunc\nWhich ring attention function to use. Currently unused, but related to above TODO.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[dict[str, torch.Tensor], int, int]\ntuple of: - Batch dictionary with sliced tensors. - The original sequence length before padding. - The number of padding tokens added." + "text": "Name\nDescription\n\n\n\n\nCompletionPromptTokenizingStrategy\nTokenizing strategy for Completion prompts.\n\n\nCompletionPrompter\nPrompter for completion\n\n\n\n\n\nprompt_strategies.completion.CompletionPromptTokenizingStrategy(\n *args,\n max_length=None,\n **kwargs,\n)\nTokenizing strategy for Completion prompts.\n\n\n\nprompt_strategies.completion.CompletionPrompter()\nPrompter for completion" }, { - "objectID": "docs/api/core.trainers.grpo.sampler.html", - "href": "docs/api/core.trainers.grpo.sampler.html", - "title": "core.trainers.grpo.sampler", + "objectID": "docs/api/prompt_strategies.orpo.chat_template.html", + "href": "docs/api/prompt_strategies.orpo.chat_template.html", + "title": "prompt_strategies.orpo.chat_template", "section": "", - "text": "core.trainers.grpo.sampler\nRepeat random sampler (similar to the one implemented in\nhttps://github.com/huggingface/trl/blob/main/trl/trainer/grpo_trainer.py) that adds\nsequence parallelism functionality; i.e., duplicating data across ranks in the same\nsequence parallel group.\n\n\n\n\n\nName\nDescription\n\n\n\n\nSequenceParallelRepeatRandomSampler\nSampler for GRPO training with sequence parallelism.\n\n\n\n\n\ncore.trainers.grpo.sampler.SequenceParallelRepeatRandomSampler(\n self,\n dataset,\n mini_repeat_count,\n world_size,\n rank,\n batch_size=1,\n repeat_count=1,\n sequence_parallel_degree=1,\n shuffle=True,\n seed=0,\n drop_last=False,\n)\nSampler for GRPO training with sequence parallelism.\nThis sampler ensures:\n- Ranks in the same sequence parallel (SP) group receive identical data.\n- Each index is repeated multiple times for sampling different completions.\n- Entire batches are repeated for reuse in multiple updates.\n- Data is properly distributed across SP groups.\nIn the table below, the values represent dataset indices. Each SP group has\nsequence_parallel_degree = 2 GPUs working together on the same data. There are 2\nSP groups (SP0 and SP1), with world_size = 4 total GPUs.\n Sequence Parallel Groups\n | SP0 | SP1 |\n | GPU 0 | GPU 1 | GPU 2 | GPU 3 |\n global_step step <---> mini_repeat_count=3\n <----------> batch_size=2 per SP group\ngrad_accum=2 ▲ ▲ 0 0 [0 0 0 1 1 1] [2 2 2 3 3 3] <- SP groups get different data\n▼ | 0 1 [0 0 0 1 1 1] [2 2 2 3 3 3] <- Same data for each SP group GPU\n|\n| 1 2 [0 0 0 1 1 1] [2 2 2 3 3 3] <- Repeat same indices for iterations\nnum_iterations=2 ▼ 1 3 [0 0 0 1 1 1] [2 2 2 3 3 3] <- When using gradient accumulation\n 2 4 [4 4 4 5 5 5] [6 6 6 7 7 7] <- New batch of data indices\n 2 5 [4 4 4 5 5 5] [6 6 6 7 7 7]\n ...\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ndataset\nSized\nDataset to sample from.\nrequired\n\n\nmini_repeat_count\nint\nHow many times to repeat each sample immediately.\nrequired\n\n\nworld_size\nint\nTotal number of processes.\nrequired\n\n\nrank\nint\nRank of current process.\nrequired\n\n\nbatch_size\nint\nNumber of samples per batch.\n1\n\n\nrepeat_count\nint\nHow many times to repeat the full sampling process.\n1\n\n\nsequence_parallel_degree\nint\nNumber of ranks in a sequence parallel group.\n1\n\n\nshuffle\nbool\nWhether to shuffle the dataset.\nTrue\n\n\nseed\nint\nRandom seed for shuffling.\n0\n\n\ndrop_last\nbool\nWhether to drop the last incomplete batch.\nFalse\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nset_epoch\nSets the epoch for this sampler.\n\n\n\n\n\ncore.trainers.grpo.sampler.SequenceParallelRepeatRandomSampler.set_epoch(epoch)\nSets the epoch for this sampler.\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nepoch\nint\nEpoch number to use for shuffling.\nrequired" + "text": "prompt_strategies.orpo.chat_template\nchatml prompt tokenization strategy for ORPO\n\n\n\n\n\nName\nDescription\n\n\n\n\nMessage\nmessage/turn\n\n\nMessageList\nconversation\n\n\nORPODatasetParsingStrategy\nStrategy to parse chosen rejected dataset into messagelist\n\n\nORPOPrompter\nSingle Turn prompter for ORPO\n\n\nORPOTokenizingStrategy\nrejected_input_ids\n\n\n\n\n\nprompt_strategies.orpo.chat_template.Message()\nmessage/turn\n\n\n\nprompt_strategies.orpo.chat_template.MessageList()\nconversation\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy()\nStrategy to parse chosen rejected dataset into messagelist\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_chosen_conversation_thread\nDataset structure mappings\n\n\nget_prompt\nMap the data to extract everything up to the last turn\n\n\nget_rejected_conversation_thread\nDataset structure mappings\n\n\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy.get_chosen_conversation_thread(\n prompt,\n)\nDataset structure mappings\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy.get_prompt(\n prompt,\n)\nMap the data to extract everything up to the last turn\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy.get_rejected_conversation_thread(\n prompt,\n)\nDataset structure mappings\n\n\n\n\n\nprompt_strategies.orpo.chat_template.ORPOPrompter(chat_template, tokenizer)\nSingle Turn prompter for ORPO\n\n\n\nprompt_strategies.orpo.chat_template.ORPOTokenizingStrategy(\n *args,\n dataset_parser=None,\n **kwargs,\n)\nrejected_input_ids\ninput_ids\nrejected_attention_mask\nattention_mask\nrejected_labels\nlabels\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nload\nchatml transforms for datasets with system, input, chosen, rejected\n\n\n\n\n\nprompt_strategies.orpo.chat_template.load(tokenizer, cfg, ds_cfg=None, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected" }, { - "objectID": "docs/api/core.trainers.grpo.sampler.html#classes", - "href": "docs/api/core.trainers.grpo.sampler.html#classes", - "title": "core.trainers.grpo.sampler", + "objectID": "docs/api/prompt_strategies.orpo.chat_template.html#classes", + "href": "docs/api/prompt_strategies.orpo.chat_template.html#classes", + "title": "prompt_strategies.orpo.chat_template", "section": "", - "text": "Name\nDescription\n\n\n\n\nSequenceParallelRepeatRandomSampler\nSampler for GRPO training with sequence parallelism.\n\n\n\n\n\ncore.trainers.grpo.sampler.SequenceParallelRepeatRandomSampler(\n self,\n dataset,\n mini_repeat_count,\n world_size,\n rank,\n batch_size=1,\n repeat_count=1,\n sequence_parallel_degree=1,\n shuffle=True,\n seed=0,\n drop_last=False,\n)\nSampler for GRPO training with sequence parallelism.\nThis sampler ensures:\n- Ranks in the same sequence parallel (SP) group receive identical data.\n- Each index is repeated multiple times for sampling different completions.\n- Entire batches are repeated for reuse in multiple updates.\n- Data is properly distributed across SP groups.\nIn the table below, the values represent dataset indices. Each SP group has\nsequence_parallel_degree = 2 GPUs working together on the same data. There are 2\nSP groups (SP0 and SP1), with world_size = 4 total GPUs.\n Sequence Parallel Groups\n | SP0 | SP1 |\n | GPU 0 | GPU 1 | GPU 2 | GPU 3 |\n global_step step <---> mini_repeat_count=3\n <----------> batch_size=2 per SP group\ngrad_accum=2 ▲ ▲ 0 0 [0 0 0 1 1 1] [2 2 2 3 3 3] <- SP groups get different data\n▼ | 0 1 [0 0 0 1 1 1] [2 2 2 3 3 3] <- Same data for each SP group GPU\n|\n| 1 2 [0 0 0 1 1 1] [2 2 2 3 3 3] <- Repeat same indices for iterations\nnum_iterations=2 ▼ 1 3 [0 0 0 1 1 1] [2 2 2 3 3 3] <- When using gradient accumulation\n 2 4 [4 4 4 5 5 5] [6 6 6 7 7 7] <- New batch of data indices\n 2 5 [4 4 4 5 5 5] [6 6 6 7 7 7]\n ...\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ndataset\nSized\nDataset to sample from.\nrequired\n\n\nmini_repeat_count\nint\nHow many times to repeat each sample immediately.\nrequired\n\n\nworld_size\nint\nTotal number of processes.\nrequired\n\n\nrank\nint\nRank of current process.\nrequired\n\n\nbatch_size\nint\nNumber of samples per batch.\n1\n\n\nrepeat_count\nint\nHow many times to repeat the full sampling process.\n1\n\n\nsequence_parallel_degree\nint\nNumber of ranks in a sequence parallel group.\n1\n\n\nshuffle\nbool\nWhether to shuffle the dataset.\nTrue\n\n\nseed\nint\nRandom seed for shuffling.\n0\n\n\ndrop_last\nbool\nWhether to drop the last incomplete batch.\nFalse\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nset_epoch\nSets the epoch for this sampler.\n\n\n\n\n\ncore.trainers.grpo.sampler.SequenceParallelRepeatRandomSampler.set_epoch(epoch)\nSets the epoch for this sampler.\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nepoch\nint\nEpoch number to use for shuffling.\nrequired" + "text": "Name\nDescription\n\n\n\n\nMessage\nmessage/turn\n\n\nMessageList\nconversation\n\n\nORPODatasetParsingStrategy\nStrategy to parse chosen rejected dataset into messagelist\n\n\nORPOPrompter\nSingle Turn prompter for ORPO\n\n\nORPOTokenizingStrategy\nrejected_input_ids\n\n\n\n\n\nprompt_strategies.orpo.chat_template.Message()\nmessage/turn\n\n\n\nprompt_strategies.orpo.chat_template.MessageList()\nconversation\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy()\nStrategy to parse chosen rejected dataset into messagelist\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_chosen_conversation_thread\nDataset structure mappings\n\n\nget_prompt\nMap the data to extract everything up to the last turn\n\n\nget_rejected_conversation_thread\nDataset structure mappings\n\n\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy.get_chosen_conversation_thread(\n prompt,\n)\nDataset structure mappings\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy.get_prompt(\n prompt,\n)\nMap the data to extract everything up to the last turn\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy.get_rejected_conversation_thread(\n prompt,\n)\nDataset structure mappings\n\n\n\n\n\nprompt_strategies.orpo.chat_template.ORPOPrompter(chat_template, tokenizer)\nSingle Turn prompter for ORPO\n\n\n\nprompt_strategies.orpo.chat_template.ORPOTokenizingStrategy(\n *args,\n dataset_parser=None,\n **kwargs,\n)\nrejected_input_ids\ninput_ids\nrejected_attention_mask\nattention_mask\nrejected_labels\nlabels" }, { - "objectID": "docs/api/prompt_strategies.user_defined.html", - "href": "docs/api/prompt_strategies.user_defined.html", - "title": "prompt_strategies.user_defined", + "objectID": "docs/api/prompt_strategies.orpo.chat_template.html#functions", + "href": "docs/api/prompt_strategies.orpo.chat_template.html#functions", + "title": "prompt_strategies.orpo.chat_template", "section": "", - "text": "prompt_strategies.user_defined\nUser Defined prompts with configuration from the YML config\n\n\n\n\n\nName\nDescription\n\n\n\n\nUserDefinedDatasetConfig\ndataclass configuration representing a userdefined dataset type\n\n\nUserDefinedPromptTokenizationStrategy\nPrompt Tokenization Strategy for user defined prompts\n\n\n\n\n\nprompt_strategies.user_defined.UserDefinedDatasetConfig(\n self,\n system_prompt='',\n field_system='system',\n field_instruction='instruction',\n field_input='input',\n field_output='output',\n format='{instruction} {input} ',\n no_input_format='{instruction} ',\n system_format='{system}',\n)\ndataclass configuration representing a userdefined dataset type\n\n\n\nprompt_strategies.user_defined.UserDefinedPromptTokenizationStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nPrompt Tokenization Strategy for user defined prompts" + "text": "Name\nDescription\n\n\n\n\nload\nchatml transforms for datasets with system, input, chosen, rejected\n\n\n\n\n\nprompt_strategies.orpo.chat_template.load(tokenizer, cfg, ds_cfg=None, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected" }, { - "objectID": "docs/api/prompt_strategies.user_defined.html#classes", - "href": "docs/api/prompt_strategies.user_defined.html#classes", - "title": "prompt_strategies.user_defined", + "objectID": "docs/api/core.trainers.mixins.optimizer.html", + "href": "docs/api/core.trainers.mixins.optimizer.html", + "title": "core.trainers.mixins.optimizer", "section": "", - "text": "Name\nDescription\n\n\n\n\nUserDefinedDatasetConfig\ndataclass configuration representing a userdefined dataset type\n\n\nUserDefinedPromptTokenizationStrategy\nPrompt Tokenization Strategy for user defined prompts\n\n\n\n\n\nprompt_strategies.user_defined.UserDefinedDatasetConfig(\n self,\n system_prompt='',\n field_system='system',\n field_instruction='instruction',\n field_input='input',\n field_output='output',\n format='{instruction} {input} ',\n no_input_format='{instruction} ',\n system_format='{system}',\n)\ndataclass configuration representing a userdefined dataset type\n\n\n\nprompt_strategies.user_defined.UserDefinedPromptTokenizationStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nPrompt Tokenization Strategy for user defined prompts" + "text": "core.trainers.mixins.optimizer\nModule for Axolotl trainer optimizer mixin\n\n\n\n\n\nName\nDescription\n\n\n\n\nOptimizerInitMixin\nMixin to handle common optimizer initialization logic for Trainers (mostly TRL) that do not\n\n\nOptimizerMixin\nMixin class for shared handling of building custom optimizers\n\n\n\n\n\ncore.trainers.mixins.optimizer.OptimizerInitMixin(*args, **kwargs)\nMixin to handle common optimizer initialization logic for Trainers (mostly TRL) that do not\naccept optimizer_cls_and_kwargs as kwarg in constructor.\n\n\n\ncore.trainers.mixins.optimizer.OptimizerMixin()\nMixin class for shared handling of building custom optimizers" }, { - "objectID": "docs/api/monkeypatch.llama_attn_hijack_flash.html", - "href": "docs/api/monkeypatch.llama_attn_hijack_flash.html", - "title": "monkeypatch.llama_attn_hijack_flash", + "objectID": "docs/api/core.trainers.mixins.optimizer.html#classes", + "href": "docs/api/core.trainers.mixins.optimizer.html#classes", + "title": "core.trainers.mixins.optimizer", "section": "", - "text": "monkeypatch.llama_attn_hijack_flash\nFlash attention monkey patch for llama model\n\n\n\n\n\nName\nDescription\n\n\n\n\nFusedAttention\nFused QKV Attention layer for incrementally improved training efficiency\n\n\nLlamaDecoderLayer\npatched version of LlamaDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nmonkeypatch.llama_attn_hijack_flash.FusedAttention(self, config, q, k, v, o)\nFused QKV Attention layer for incrementally improved training efficiency\n\n\n\nmonkeypatch.llama_attn_hijack_flash.LlamaDecoderLayer()\npatched version of LlamaDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nName\nDescription\n\n\n\n\nforward\n\n\n\n\n\n\nmonkeypatch.llama_attn_hijack_flash.LlamaDecoderLayer.forward(\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n padding_mask=None,\n cu_seqlens=None,\n max_seqlen=None,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nhidden_states\ntorch.FloatTensor\ninput to the layer of shape (batch, seq_len, embed_dim)\nrequired\n\n\nattention_mask\ntorch.FloatTensor, optional\nattention mask of size (batch, 1, tgt_len, src_len) where padding elements are indicated by very large negative values.\nNone\n\n\noutput_attentions\nbool, optional\nWhether or not to return the attentions tensors of all attention layers. See attentions under returned tensors for more detail.\nFalse\n\n\nuse_cache\nbool, optional\nIf set to True, past_key_values key value states are returned and can be used to speed up decoding (see past_key_values).\nFalse\n\n\npast_key_value\nTuple(torch.FloatTensor), optional\ncached past key and value projection states\nNone\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nflashattn_forward\nInput shape: Batch x Time x Channel\n\n\nflashattn_forward_with_s2attn\nInput shape: Batch x Time x Channel\n\n\ngenerate_qkv\n\n\n\n\n\n\nmonkeypatch.llama_attn_hijack_flash.flashattn_forward(\n self,\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n padding_mask=None,\n cu_seqlens=None,\n max_seqlen=None,\n)\nInput shape: Batch x Time x Channel\nattention_mask: [bsz, q_len]\n\n\n\nmonkeypatch.llama_attn_hijack_flash.flashattn_forward_with_s2attn(\n self,\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n padding_mask=None,\n cu_seqlens=None,\n max_seqlen=None,\n)\nInput shape: Batch x Time x Channel\nFrom: https://github.com/dvlab-research/LongLoRA/blob/main/llama_attn_replace.py\nattention_mask: [bsz, q_len]\ncu_seqlens will be ignored if provided\nmax_seqlen will be ignored if provided\n\n\n\nmonkeypatch.llama_attn_hijack_flash.generate_qkv(\n q,\n k,\n v,\n query_padding_mask=None,\n key_padding_mask=None,\n kvpacked=False,\n qkvpacked=False,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nq\n\n(batch_size, seqlen_q, nheads, d)\nrequired\n\n\nk\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nv\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nquery_padding_mask\n\n(batch_size, seqlen), bool\nNone\n\n\nkey_padding_mask\n\n(batch_size, seqlen), bool\nNone" + "text": "Name\nDescription\n\n\n\n\nOptimizerInitMixin\nMixin to handle common optimizer initialization logic for Trainers (mostly TRL) that do not\n\n\nOptimizerMixin\nMixin class for shared handling of building custom optimizers\n\n\n\n\n\ncore.trainers.mixins.optimizer.OptimizerInitMixin(*args, **kwargs)\nMixin to handle common optimizer initialization logic for Trainers (mostly TRL) that do not\naccept optimizer_cls_and_kwargs as kwarg in constructor.\n\n\n\ncore.trainers.mixins.optimizer.OptimizerMixin()\nMixin class for shared handling of building custom optimizers" }, { - "objectID": "docs/api/monkeypatch.llama_attn_hijack_flash.html#classes", - "href": "docs/api/monkeypatch.llama_attn_hijack_flash.html#classes", - "title": "monkeypatch.llama_attn_hijack_flash", + "objectID": "docs/api/datasets.html", + "href": "docs/api/datasets.html", + "title": "datasets", "section": "", - "text": "Name\nDescription\n\n\n\n\nFusedAttention\nFused QKV Attention layer for incrementally improved training efficiency\n\n\nLlamaDecoderLayer\npatched version of LlamaDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nmonkeypatch.llama_attn_hijack_flash.FusedAttention(self, config, q, k, v, o)\nFused QKV Attention layer for incrementally improved training efficiency\n\n\n\nmonkeypatch.llama_attn_hijack_flash.LlamaDecoderLayer()\npatched version of LlamaDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nName\nDescription\n\n\n\n\nforward\n\n\n\n\n\n\nmonkeypatch.llama_attn_hijack_flash.LlamaDecoderLayer.forward(\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n padding_mask=None,\n cu_seqlens=None,\n max_seqlen=None,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nhidden_states\ntorch.FloatTensor\ninput to the layer of shape (batch, seq_len, embed_dim)\nrequired\n\n\nattention_mask\ntorch.FloatTensor, optional\nattention mask of size (batch, 1, tgt_len, src_len) where padding elements are indicated by very large negative values.\nNone\n\n\noutput_attentions\nbool, optional\nWhether or not to return the attentions tensors of all attention layers. See attentions under returned tensors for more detail.\nFalse\n\n\nuse_cache\nbool, optional\nIf set to True, past_key_values key value states are returned and can be used to speed up decoding (see past_key_values).\nFalse\n\n\npast_key_value\nTuple(torch.FloatTensor), optional\ncached past key and value projection states\nNone" + "text": "datasets\nModule containing Dataset functionality\n\n\n\n\n\nName\nDescription\n\n\n\n\nConstantLengthDataset\nIterable dataset that returns constant length chunks of tokens from stream of text files.\n\n\nTokenizedPromptDataset\nDataset that returns tokenized prompts from a stream of text files.\n\n\n\n\n\ndatasets.ConstantLengthDataset(tokenizer, datasets, seq_length=2048)\nIterable dataset that returns constant length chunks of tokens from stream of text files.\nArgs:\ntokenizer (Tokenizer): The processor used for processing the data.\ndataset (dataset.Dataset): Dataset with text files.\nseq_length (int): Length of token sequences to return.\n\n\n\ndatasets.TokenizedPromptDataset(\n prompt_tokenizer,\n dataset,\n process_count=None,\n keep_in_memory=False,\n **kwargs,\n)\nDataset that returns tokenized prompts from a stream of text files.\nArgs:\nprompt_tokenizer (PromptTokenizingStrategy): The prompt tokenizing method for processing the data.\ndataset (dataset.Dataset): Dataset with text files.\nprocess_count (int): Number of processes to use for tokenizing.\nkeep_in_memory (bool): Whether to keep the tokenized dataset in memory." }, { - "objectID": "docs/api/monkeypatch.llama_attn_hijack_flash.html#functions", - "href": "docs/api/monkeypatch.llama_attn_hijack_flash.html#functions", - "title": "monkeypatch.llama_attn_hijack_flash", + "objectID": "docs/api/datasets.html#classes", + "href": "docs/api/datasets.html#classes", + "title": "datasets", "section": "", - "text": "Name\nDescription\n\n\n\n\nflashattn_forward\nInput shape: Batch x Time x Channel\n\n\nflashattn_forward_with_s2attn\nInput shape: Batch x Time x Channel\n\n\ngenerate_qkv\n\n\n\n\n\n\nmonkeypatch.llama_attn_hijack_flash.flashattn_forward(\n self,\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n padding_mask=None,\n cu_seqlens=None,\n max_seqlen=None,\n)\nInput shape: Batch x Time x Channel\nattention_mask: [bsz, q_len]\n\n\n\nmonkeypatch.llama_attn_hijack_flash.flashattn_forward_with_s2attn(\n self,\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n padding_mask=None,\n cu_seqlens=None,\n max_seqlen=None,\n)\nInput shape: Batch x Time x Channel\nFrom: https://github.com/dvlab-research/LongLoRA/blob/main/llama_attn_replace.py\nattention_mask: [bsz, q_len]\ncu_seqlens will be ignored if provided\nmax_seqlen will be ignored if provided\n\n\n\nmonkeypatch.llama_attn_hijack_flash.generate_qkv(\n q,\n k,\n v,\n query_padding_mask=None,\n key_padding_mask=None,\n kvpacked=False,\n qkvpacked=False,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nq\n\n(batch_size, seqlen_q, nheads, d)\nrequired\n\n\nk\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nv\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nquery_padding_mask\n\n(batch_size, seqlen), bool\nNone\n\n\nkey_padding_mask\n\n(batch_size, seqlen), bool\nNone" - }, - { - "objectID": "docs/api/cli.train.html", - "href": "docs/api/cli.train.html", - "title": "cli.train", - "section": "", - "text": "cli.train\nCLI to run training on a model.\n\n\n\n\n\nName\nDescription\n\n\n\n\ndo_cli\nParses axolotl config, CLI args, and calls do_train.\n\n\ndo_train\nTrains a transformers model by first loading the dataset(s) specified in the\n\n\n\n\n\ncli.train.do_cli(config=Path('examples/'), **kwargs)\nParses axolotl config, CLI args, and calls do_train.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nPath to axolotl config YAML file.\nPath('examples/')\n\n\nkwargs\n\nAdditional keyword arguments to override config file values.\n{}\n\n\n\n\n\n\n\ncli.train.do_train(cfg, cli_args)\nTrains a transformers model by first loading the dataset(s) specified in the\naxolotl config, and then calling axolotl.train.train. Also runs the plugin\nmanager’s post_train_unload once training completes.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ncli_args\nTrainerCliArgs\nTraining-specific CLI arguments.\nrequired" - }, - { - "objectID": "docs/api/cli.train.html#functions", - "href": "docs/api/cli.train.html#functions", - "title": "cli.train", - "section": "", - "text": "Name\nDescription\n\n\n\n\ndo_cli\nParses axolotl config, CLI args, and calls do_train.\n\n\ndo_train\nTrains a transformers model by first loading the dataset(s) specified in the\n\n\n\n\n\ncli.train.do_cli(config=Path('examples/'), **kwargs)\nParses axolotl config, CLI args, and calls do_train.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nPath to axolotl config YAML file.\nPath('examples/')\n\n\nkwargs\n\nAdditional keyword arguments to override config file values.\n{}\n\n\n\n\n\n\n\ncli.train.do_train(cfg, cli_args)\nTrains a transformers model by first loading the dataset(s) specified in the\naxolotl config, and then calling axolotl.train.train. Also runs the plugin\nmanager’s post_train_unload once training completes.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ncli_args\nTrainerCliArgs\nTraining-specific CLI arguments.\nrequired" - }, - { - "objectID": "docs/api/cli.quantize.html", - "href": "docs/api/cli.quantize.html", - "title": "cli.quantize", - "section": "", - "text": "cli.quantize\nCLI to post-training quantize a model using torchao\n\n\n\n\n\nName\nDescription\n\n\n\n\ndo_quantize\nQuantizes a model’s model’s weights\n\n\n\n\n\ncli.quantize.do_quantize(config, cli_args)\nQuantizes a model’s model’s weights\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nThe path to the config file\nrequired\n\n\ncli_args\ndict\nAdditional command-line arguments\nrequired" - }, - { - "objectID": "docs/api/cli.quantize.html#functions", - "href": "docs/api/cli.quantize.html#functions", - "title": "cli.quantize", - "section": "", - "text": "Name\nDescription\n\n\n\n\ndo_quantize\nQuantizes a model’s model’s weights\n\n\n\n\n\ncli.quantize.do_quantize(config, cli_args)\nQuantizes a model’s model’s weights\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nThe path to the config file\nrequired\n\n\ncli_args\ndict\nAdditional command-line arguments\nrequired" + "text": "Name\nDescription\n\n\n\n\nConstantLengthDataset\nIterable dataset that returns constant length chunks of tokens from stream of text files.\n\n\nTokenizedPromptDataset\nDataset that returns tokenized prompts from a stream of text files.\n\n\n\n\n\ndatasets.ConstantLengthDataset(tokenizer, datasets, seq_length=2048)\nIterable dataset that returns constant length chunks of tokens from stream of text files.\nArgs:\ntokenizer (Tokenizer): The processor used for processing the data.\ndataset (dataset.Dataset): Dataset with text files.\nseq_length (int): Length of token sequences to return.\n\n\n\ndatasets.TokenizedPromptDataset(\n prompt_tokenizer,\n dataset,\n process_count=None,\n keep_in_memory=False,\n **kwargs,\n)\nDataset that returns tokenized prompts from a stream of text files.\nArgs:\nprompt_tokenizer (PromptTokenizingStrategy): The prompt tokenizing method for processing the data.\ndataset (dataset.Dataset): Dataset with text files.\nprocess_count (int): Number of processes to use for tokenizing.\nkeep_in_memory (bool): Whether to keep the tokenized dataset in memory." }, { "objectID": "docs/api/cli.utils.html", @@ -2167,375 +2160,403 @@ "text": "Acknowledgements\nPortions of this Cut Cross Entropy Software may utilize the following copyrighted\nmaterial, the use of which is hereby acknowledged.\n\nPyTorch\nFrom PyTorch:\n\nCopyright (c) 2016- Facebook, Inc (Adam Paszke)\nCopyright (c) 2014- Facebook, Inc (Soumith Chintala)\nCopyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\nCopyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\nCopyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\nCopyright (c) 2011-2013 NYU (Clement Farabet)\nCopyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\nCopyright (c) 2006 Idiap Research Institute (Samy Bengio)\nCopyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\nFrom Caffe2:\n\nCopyright (c) 2016-present, Facebook Inc. All rights reserved.\n\nAll contributions by Facebook:\nCopyright (c) 2016 Facebook Inc.\n\nAll contributions by Google:\nCopyright (c) 2015 Google Inc.\nAll rights reserved.\n\nAll contributions by Yangqing Jia:\nCopyright (c) 2015 Yangqing Jia\nAll rights reserved.\n\nAll contributions by Kakao Brain:\nCopyright 2019-2020 Kakao Brain\n\nAll contributions by Cruise LLC:\nCopyright (c) 2022 Cruise LLC.\nAll rights reserved.\n\nAll contributions by Arm:\nCopyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates\n\nAll contributions from Caffe:\nCopyright(c) 2013, 2014, 2015, the respective contributors\nAll rights reserved.\n\nAll other contributions:\nCopyright(c) 2015, 2016 the respective contributors\nAll rights reserved.\n\nCaffe2 uses a copyright model similar to Caffe: each contributor holds\ncopyright over their contributions to Caffe2. The project versioning records\nall such contribution and copyright details. If a contributor wants to further\nmark their specific copyright on a particular contribution, they should\nindicate their copyright solely in the commit message of the change when it is\ncommitted.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America\nand IDIAP Research Institute nor the names of its contributors may be\nused to endorse or promote products derived from this software without\nspecific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\nTriton\n/*\n* Copyright 2018-2020 Philippe Tillet\n* Copyright 2020-2022 OpenAI\n*\n* Permission is hereby granted, free of charge, to any person obtaining\n* a copy of this software and associated documentation files\n* (the \"Software\"), to deal in the Software without restriction,\n* including without limitation the rights to use, copy, modify, merge,\n* publish, distribute, sublicense, and/or sell copies of the Software,\n* and to permit persons to whom the Software is furnished to do so,\n* subject to the following conditions:\n*\n* The above copyright notice and this permission notice shall be\n* included in all copies or substantial portions of the Software.\n*\n* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n*/\nTransformers\nCopyright 2018- The Hugging Face team. All rights reserved.\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License." }, { - "objectID": "docs/api/datasets.html", - "href": "docs/api/datasets.html", - "title": "datasets", + "objectID": "docs/api/core.builders.causal.html", + "href": "docs/api/core.builders.causal.html", + "title": "core.builders.causal", "section": "", - "text": "datasets\nModule containing Dataset functionality\n\n\n\n\n\nName\nDescription\n\n\n\n\nConstantLengthDataset\nIterable dataset that returns constant length chunks of tokens from stream of text files.\n\n\nTokenizedPromptDataset\nDataset that returns tokenized prompts from a stream of text files.\n\n\n\n\n\ndatasets.ConstantLengthDataset(self, tokenizer, datasets, seq_length=2048)\nIterable dataset that returns constant length chunks of tokens from stream of text files.\nArgs:\ntokenizer (Tokenizer): The processor used for processing the data.\ndataset (dataset.Dataset): Dataset with text files.\nseq_length (int): Length of token sequences to return.\n\n\n\ndatasets.TokenizedPromptDataset(\n self,\n prompt_tokenizer,\n dataset,\n process_count=None,\n keep_in_memory=False,\n **kwargs,\n)\nDataset that returns tokenized prompts from a stream of text files.\nArgs:\nprompt_tokenizer (PromptTokenizingStrategy): The prompt tokenizing method for processing the data.\ndataset (dataset.Dataset): Dataset with text files.\nprocess_count (int): Number of processes to use for tokenizing.\nkeep_in_memory (bool): Whether to keep the tokenized dataset in memory." + "text": "core.builders.causal\nBuilder for causal trainers\n\n\n\n\n\nName\nDescription\n\n\n\n\nHFCausalTrainerBuilder\nBuild the HuggingFace training args/trainer for causal models and reward modeling\n\n\n\n\n\ncore.builders.causal.HFCausalTrainerBuilder(\n cfg,\n model,\n tokenizer,\n processor=None,\n)\nBuild the HuggingFace training args/trainer for causal models and reward modeling\nusing TRL." }, { - "objectID": "docs/api/datasets.html#classes", - "href": "docs/api/datasets.html#classes", - "title": "datasets", + "objectID": "docs/api/core.builders.causal.html#classes", + "href": "docs/api/core.builders.causal.html#classes", + "title": "core.builders.causal", "section": "", - "text": "Name\nDescription\n\n\n\n\nConstantLengthDataset\nIterable dataset that returns constant length chunks of tokens from stream of text files.\n\n\nTokenizedPromptDataset\nDataset that returns tokenized prompts from a stream of text files.\n\n\n\n\n\ndatasets.ConstantLengthDataset(self, tokenizer, datasets, seq_length=2048)\nIterable dataset that returns constant length chunks of tokens from stream of text files.\nArgs:\ntokenizer (Tokenizer): The processor used for processing the data.\ndataset (dataset.Dataset): Dataset with text files.\nseq_length (int): Length of token sequences to return.\n\n\n\ndatasets.TokenizedPromptDataset(\n self,\n prompt_tokenizer,\n dataset,\n process_count=None,\n keep_in_memory=False,\n **kwargs,\n)\nDataset that returns tokenized prompts from a stream of text files.\nArgs:\nprompt_tokenizer (PromptTokenizingStrategy): The prompt tokenizing method for processing the data.\ndataset (dataset.Dataset): Dataset with text files.\nprocess_count (int): Number of processes to use for tokenizing.\nkeep_in_memory (bool): Whether to keep the tokenized dataset in memory." + "text": "Name\nDescription\n\n\n\n\nHFCausalTrainerBuilder\nBuild the HuggingFace training args/trainer for causal models and reward modeling\n\n\n\n\n\ncore.builders.causal.HFCausalTrainerBuilder(\n cfg,\n model,\n tokenizer,\n processor=None,\n)\nBuild the HuggingFace training args/trainer for causal models and reward modeling\nusing TRL." }, { - "objectID": "docs/api/core.trainers.mixins.optimizer.html", - "href": "docs/api/core.trainers.mixins.optimizer.html", - "title": "core.trainers.mixins.optimizer", + "objectID": "docs/api/cli.quantize.html", + "href": "docs/api/cli.quantize.html", + "title": "cli.quantize", "section": "", - "text": "core.trainers.mixins.optimizer\nModule for Axolotl trainer optimizer mixin\n\n\n\n\n\nName\nDescription\n\n\n\n\nOptimizerMixin\nMixin class for shared handling of building custom optimizers\n\n\n\n\n\ncore.trainers.mixins.optimizer.OptimizerMixin()\nMixin class for shared handling of building custom optimizers" + "text": "cli.quantize\nCLI to post-training quantize a model using torchao\n\n\n\n\n\nName\nDescription\n\n\n\n\ndo_quantize\nQuantizes a model’s model’s weights\n\n\n\n\n\ncli.quantize.do_quantize(config, cli_args)\nQuantizes a model’s model’s weights\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nThe path to the config file\nrequired\n\n\ncli_args\ndict\nAdditional command-line arguments\nrequired" }, { - "objectID": "docs/api/core.trainers.mixins.optimizer.html#classes", - "href": "docs/api/core.trainers.mixins.optimizer.html#classes", - "title": "core.trainers.mixins.optimizer", + "objectID": "docs/api/cli.quantize.html#functions", + "href": "docs/api/cli.quantize.html#functions", + "title": "cli.quantize", "section": "", - "text": "Name\nDescription\n\n\n\n\nOptimizerMixin\nMixin class for shared handling of building custom optimizers\n\n\n\n\n\ncore.trainers.mixins.optimizer.OptimizerMixin()\nMixin class for shared handling of building custom optimizers" + "text": "Name\nDescription\n\n\n\n\ndo_quantize\nQuantizes a model’s model’s weights\n\n\n\n\n\ncli.quantize.do_quantize(config, cli_args)\nQuantizes a model’s model’s weights\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nThe path to the config file\nrequired\n\n\ncli_args\ndict\nAdditional command-line arguments\nrequired" }, { - "objectID": "docs/api/prompt_strategies.orpo.chat_template.html", - "href": "docs/api/prompt_strategies.orpo.chat_template.html", - "title": "prompt_strategies.orpo.chat_template", + "objectID": "docs/api/cli.train.html", + "href": "docs/api/cli.train.html", + "title": "cli.train", "section": "", - "text": "prompt_strategies.orpo.chat_template\nchatml prompt tokenization strategy for ORPO\n\n\n\n\n\nName\nDescription\n\n\n\n\nMessage\nmessage/turn\n\n\nMessageList\nconversation\n\n\nORPODatasetParsingStrategy\nStrategy to parse chosen rejected dataset into messagelist\n\n\nORPOPrompter\nSingle Turn prompter for ORPO\n\n\nORPOTokenizingStrategy\nrejected_input_ids\n\n\n\n\n\nprompt_strategies.orpo.chat_template.Message()\nmessage/turn\n\n\n\nprompt_strategies.orpo.chat_template.MessageList()\nconversation\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy()\nStrategy to parse chosen rejected dataset into messagelist\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_chosen_conversation_thread\nDataset structure mappings\n\n\nget_prompt\nMap the data to extract everything up to the last turn\n\n\nget_rejected_conversation_thread\nDataset structure mappings\n\n\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy.get_chosen_conversation_thread(\n prompt,\n)\nDataset structure mappings\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy.get_prompt(\n prompt,\n)\nMap the data to extract everything up to the last turn\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy.get_rejected_conversation_thread(\n prompt,\n)\nDataset structure mappings\n\n\n\n\n\nprompt_strategies.orpo.chat_template.ORPOPrompter(\n self,\n chat_template,\n tokenizer,\n)\nSingle Turn prompter for ORPO\n\n\n\nprompt_strategies.orpo.chat_template.ORPOTokenizingStrategy(\n self,\n *args,\n dataset_parser=None,\n **kwargs,\n)\nrejected_input_ids\ninput_ids\nrejected_attention_mask\nattention_mask\nrejected_labels\nlabels\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nload\nchatml transforms for datasets with system, input, chosen, rejected\n\n\n\n\n\nprompt_strategies.orpo.chat_template.load(tokenizer, cfg, ds_cfg=None, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected" + "text": "cli.train\nCLI to run training on a model.\n\n\n\n\n\nName\nDescription\n\n\n\n\ndo_cli\nParses axolotl config, CLI args, and calls do_train.\n\n\ndo_train\nTrains a transformers model by first loading the dataset(s) specified in the\n\n\n\n\n\ncli.train.do_cli(config=Path('examples/'), **kwargs)\nParses axolotl config, CLI args, and calls do_train.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nPath to axolotl config YAML file.\nPath('examples/')\n\n\nkwargs\n\nAdditional keyword arguments to override config file values.\n{}\n\n\n\n\n\n\n\ncli.train.do_train(cfg, cli_args)\nTrains a transformers model by first loading the dataset(s) specified in the\naxolotl config, and then calling axolotl.train.train. Also runs the plugin\nmanager’s post_train_unload once training completes.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ncli_args\nTrainerCliArgs\nTraining-specific CLI arguments.\nrequired" }, { - "objectID": "docs/api/prompt_strategies.orpo.chat_template.html#classes", - "href": "docs/api/prompt_strategies.orpo.chat_template.html#classes", - "title": "prompt_strategies.orpo.chat_template", + "objectID": "docs/api/cli.train.html#functions", + "href": "docs/api/cli.train.html#functions", + "title": "cli.train", "section": "", - "text": "Name\nDescription\n\n\n\n\nMessage\nmessage/turn\n\n\nMessageList\nconversation\n\n\nORPODatasetParsingStrategy\nStrategy to parse chosen rejected dataset into messagelist\n\n\nORPOPrompter\nSingle Turn prompter for ORPO\n\n\nORPOTokenizingStrategy\nrejected_input_ids\n\n\n\n\n\nprompt_strategies.orpo.chat_template.Message()\nmessage/turn\n\n\n\nprompt_strategies.orpo.chat_template.MessageList()\nconversation\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy()\nStrategy to parse chosen rejected dataset into messagelist\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_chosen_conversation_thread\nDataset structure mappings\n\n\nget_prompt\nMap the data to extract everything up to the last turn\n\n\nget_rejected_conversation_thread\nDataset structure mappings\n\n\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy.get_chosen_conversation_thread(\n prompt,\n)\nDataset structure mappings\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy.get_prompt(\n prompt,\n)\nMap the data to extract everything up to the last turn\n\n\n\nprompt_strategies.orpo.chat_template.ORPODatasetParsingStrategy.get_rejected_conversation_thread(\n prompt,\n)\nDataset structure mappings\n\n\n\n\n\nprompt_strategies.orpo.chat_template.ORPOPrompter(\n self,\n chat_template,\n tokenizer,\n)\nSingle Turn prompter for ORPO\n\n\n\nprompt_strategies.orpo.chat_template.ORPOTokenizingStrategy(\n self,\n *args,\n dataset_parser=None,\n **kwargs,\n)\nrejected_input_ids\ninput_ids\nrejected_attention_mask\nattention_mask\nrejected_labels\nlabels" + "text": "Name\nDescription\n\n\n\n\ndo_cli\nParses axolotl config, CLI args, and calls do_train.\n\n\ndo_train\nTrains a transformers model by first loading the dataset(s) specified in the\n\n\n\n\n\ncli.train.do_cli(config=Path('examples/'), **kwargs)\nParses axolotl config, CLI args, and calls do_train.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nPath to axolotl config YAML file.\nPath('examples/')\n\n\nkwargs\n\nAdditional keyword arguments to override config file values.\n{}\n\n\n\n\n\n\n\ncli.train.do_train(cfg, cli_args)\nTrains a transformers model by first loading the dataset(s) specified in the\naxolotl config, and then calling axolotl.train.train. Also runs the plugin\nmanager’s post_train_unload once training completes.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ncli_args\nTrainerCliArgs\nTraining-specific CLI arguments.\nrequired" }, { - "objectID": "docs/api/prompt_strategies.orpo.chat_template.html#functions", - "href": "docs/api/prompt_strategies.orpo.chat_template.html#functions", - "title": "prompt_strategies.orpo.chat_template", + "objectID": "docs/api/monkeypatch.llama_attn_hijack_flash.html", + "href": "docs/api/monkeypatch.llama_attn_hijack_flash.html", + "title": "monkeypatch.llama_attn_hijack_flash", "section": "", - "text": "Name\nDescription\n\n\n\n\nload\nchatml transforms for datasets with system, input, chosen, rejected\n\n\n\n\n\nprompt_strategies.orpo.chat_template.load(tokenizer, cfg, ds_cfg=None, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected" + "text": "monkeypatch.llama_attn_hijack_flash\nFlash attention monkey patch for llama model\n\n\n\n\n\nName\nDescription\n\n\n\n\nFusedAttention\nFused QKV Attention layer for incrementally improved training efficiency\n\n\nLlamaDecoderLayer\npatched version of LlamaDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nmonkeypatch.llama_attn_hijack_flash.FusedAttention(config, q, k, v, o)\nFused QKV Attention layer for incrementally improved training efficiency\n\n\n\nmonkeypatch.llama_attn_hijack_flash.LlamaDecoderLayer()\npatched version of LlamaDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nName\nDescription\n\n\n\n\nforward\n\n\n\n\n\n\nmonkeypatch.llama_attn_hijack_flash.LlamaDecoderLayer.forward(\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n padding_mask=None,\n cu_seqlens=None,\n max_seqlen=None,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nhidden_states\ntorch.FloatTensor\ninput to the layer of shape (batch, seq_len, embed_dim)\nrequired\n\n\nattention_mask\ntorch.FloatTensor, optional\nattention mask of size (batch, 1, tgt_len, src_len) where padding elements are indicated by very large negative values.\nNone\n\n\noutput_attentions\nbool, optional\nWhether or not to return the attentions tensors of all attention layers. See attentions under returned tensors for more detail.\nFalse\n\n\nuse_cache\nbool, optional\nIf set to True, past_key_values key value states are returned and can be used to speed up decoding (see past_key_values).\nFalse\n\n\npast_key_value\nTuple(torch.FloatTensor), optional\ncached past key and value projection states\nNone\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nflashattn_forward\nInput shape: Batch x Time x Channel\n\n\nflashattn_forward_with_s2attn\nInput shape: Batch x Time x Channel\n\n\ngenerate_qkv\n\n\n\n\n\n\nmonkeypatch.llama_attn_hijack_flash.flashattn_forward(\n self,\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n padding_mask=None,\n cu_seqlens=None,\n max_seqlen=None,\n)\nInput shape: Batch x Time x Channel\nattention_mask: [bsz, q_len]\n\n\n\nmonkeypatch.llama_attn_hijack_flash.flashattn_forward_with_s2attn(\n self,\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n padding_mask=None,\n cu_seqlens=None,\n max_seqlen=None,\n)\nInput shape: Batch x Time x Channel\nFrom: https://github.com/dvlab-research/LongLoRA/blob/main/llama_attn_replace.py\nattention_mask: [bsz, q_len]\ncu_seqlens will be ignored if provided\nmax_seqlen will be ignored if provided\n\n\n\nmonkeypatch.llama_attn_hijack_flash.generate_qkv(\n q,\n k,\n v,\n query_padding_mask=None,\n key_padding_mask=None,\n kvpacked=False,\n qkvpacked=False,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nq\n\n(batch_size, seqlen_q, nheads, d)\nrequired\n\n\nk\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nv\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nquery_padding_mask\n\n(batch_size, seqlen), bool\nNone\n\n\nkey_padding_mask\n\n(batch_size, seqlen), bool\nNone" }, { - "objectID": "docs/api/prompt_strategies.completion.html", - "href": "docs/api/prompt_strategies.completion.html", - "title": "prompt_strategies.completion", + "objectID": "docs/api/monkeypatch.llama_attn_hijack_flash.html#classes", + "href": "docs/api/monkeypatch.llama_attn_hijack_flash.html#classes", + "title": "monkeypatch.llama_attn_hijack_flash", "section": "", - "text": "prompt_strategies.completion\nBasic completion text\n\n\n\n\n\nName\nDescription\n\n\n\n\nCompletionPromptTokenizingStrategy\nTokenizing strategy for Completion prompts.\n\n\nCompletionPrompter\nPrompter for completion\n\n\n\n\n\nprompt_strategies.completion.CompletionPromptTokenizingStrategy(\n self,\n *args,\n max_length=None,\n **kwargs,\n)\nTokenizing strategy for Completion prompts.\n\n\n\nprompt_strategies.completion.CompletionPrompter()\nPrompter for completion" + "text": "Name\nDescription\n\n\n\n\nFusedAttention\nFused QKV Attention layer for incrementally improved training efficiency\n\n\nLlamaDecoderLayer\npatched version of LlamaDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nmonkeypatch.llama_attn_hijack_flash.FusedAttention(config, q, k, v, o)\nFused QKV Attention layer for incrementally improved training efficiency\n\n\n\nmonkeypatch.llama_attn_hijack_flash.LlamaDecoderLayer()\npatched version of LlamaDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nName\nDescription\n\n\n\n\nforward\n\n\n\n\n\n\nmonkeypatch.llama_attn_hijack_flash.LlamaDecoderLayer.forward(\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n padding_mask=None,\n cu_seqlens=None,\n max_seqlen=None,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nhidden_states\ntorch.FloatTensor\ninput to the layer of shape (batch, seq_len, embed_dim)\nrequired\n\n\nattention_mask\ntorch.FloatTensor, optional\nattention mask of size (batch, 1, tgt_len, src_len) where padding elements are indicated by very large negative values.\nNone\n\n\noutput_attentions\nbool, optional\nWhether or not to return the attentions tensors of all attention layers. See attentions under returned tensors for more detail.\nFalse\n\n\nuse_cache\nbool, optional\nIf set to True, past_key_values key value states are returned and can be used to speed up decoding (see past_key_values).\nFalse\n\n\npast_key_value\nTuple(torch.FloatTensor), optional\ncached past key and value projection states\nNone" }, { - "objectID": "docs/api/prompt_strategies.completion.html#classes", - "href": "docs/api/prompt_strategies.completion.html#classes", - "title": "prompt_strategies.completion", + "objectID": "docs/api/monkeypatch.llama_attn_hijack_flash.html#functions", + "href": "docs/api/monkeypatch.llama_attn_hijack_flash.html#functions", + "title": "monkeypatch.llama_attn_hijack_flash", "section": "", - "text": "Name\nDescription\n\n\n\n\nCompletionPromptTokenizingStrategy\nTokenizing strategy for Completion prompts.\n\n\nCompletionPrompter\nPrompter for completion\n\n\n\n\n\nprompt_strategies.completion.CompletionPromptTokenizingStrategy(\n self,\n *args,\n max_length=None,\n **kwargs,\n)\nTokenizing strategy for Completion prompts.\n\n\n\nprompt_strategies.completion.CompletionPrompter()\nPrompter for completion" + "text": "Name\nDescription\n\n\n\n\nflashattn_forward\nInput shape: Batch x Time x Channel\n\n\nflashattn_forward_with_s2attn\nInput shape: Batch x Time x Channel\n\n\ngenerate_qkv\n\n\n\n\n\n\nmonkeypatch.llama_attn_hijack_flash.flashattn_forward(\n self,\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n padding_mask=None,\n cu_seqlens=None,\n max_seqlen=None,\n)\nInput shape: Batch x Time x Channel\nattention_mask: [bsz, q_len]\n\n\n\nmonkeypatch.llama_attn_hijack_flash.flashattn_forward_with_s2attn(\n self,\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n padding_mask=None,\n cu_seqlens=None,\n max_seqlen=None,\n)\nInput shape: Batch x Time x Channel\nFrom: https://github.com/dvlab-research/LongLoRA/blob/main/llama_attn_replace.py\nattention_mask: [bsz, q_len]\ncu_seqlens will be ignored if provided\nmax_seqlen will be ignored if provided\n\n\n\nmonkeypatch.llama_attn_hijack_flash.generate_qkv(\n q,\n k,\n v,\n query_padding_mask=None,\n key_padding_mask=None,\n kvpacked=False,\n qkvpacked=False,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nq\n\n(batch_size, seqlen_q, nheads, d)\nrequired\n\n\nk\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nv\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nquery_padding_mask\n\n(batch_size, seqlen), bool\nNone\n\n\nkey_padding_mask\n\n(batch_size, seqlen), bool\nNone" }, { - "objectID": "docs/api/evaluate.html", - "href": "docs/api/evaluate.html", - "title": "evaluate", + "objectID": "docs/api/prompt_strategies.user_defined.html", + "href": "docs/api/prompt_strategies.user_defined.html", + "title": "prompt_strategies.user_defined", "section": "", - "text": "evaluate\nModule for evaluating models.\n\n\n\n\n\nName\nDescription\n\n\n\n\nevaluate\nEvaluate a model on training and validation datasets.\n\n\nevaluate_dataset\nHelper function to evaluate a single dataset.\n\n\n\n\n\nevaluate.evaluate(cfg, dataset_meta)\nEvaluate a model on training and validation datasets.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ndataset_meta\nTrainDatasetMeta\nDataset metadata containing training and evaluation datasets.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nDict[str, float]\nDictionary mapping metric names to their values.\n\n\n\n\n\n\n\nevaluate.evaluate_dataset(trainer, dataset, dataset_type, flash_optimum=False)\nHelper function to evaluate a single dataset.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntrainer\nTrainer\nThe trainer instance.\nrequired\n\n\ndataset\nDataset\nDataset to evaluate.\nrequired\n\n\ndataset_type\nstr\nType of dataset (‘train’ or ‘eval’).\nrequired\n\n\nflash_optimum\nbool\nWhether to use flash optimum.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nOptional[Dict[str, float]]\nDictionary of metrics or None if dataset is None." + "text": "prompt_strategies.user_defined\nUser Defined prompts with configuration from the YML config\n\n\n\n\n\nName\nDescription\n\n\n\n\nUserDefinedDatasetConfig\ndataclass configuration representing a userdefined dataset type\n\n\nUserDefinedPromptTokenizationStrategy\nPrompt Tokenization Strategy for user defined prompts\n\n\n\n\n\nprompt_strategies.user_defined.UserDefinedDatasetConfig(\n system_prompt='',\n field_system='system',\n field_instruction='instruction',\n field_input='input',\n field_output='output',\n format='{instruction} {input} ',\n no_input_format='{instruction} ',\n system_format='{system}',\n)\ndataclass configuration representing a userdefined dataset type\n\n\n\nprompt_strategies.user_defined.UserDefinedPromptTokenizationStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nPrompt Tokenization Strategy for user defined prompts" }, { - "objectID": "docs/api/evaluate.html#functions", - "href": "docs/api/evaluate.html#functions", - "title": "evaluate", + "objectID": "docs/api/prompt_strategies.user_defined.html#classes", + "href": "docs/api/prompt_strategies.user_defined.html#classes", + "title": "prompt_strategies.user_defined", "section": "", - "text": "Name\nDescription\n\n\n\n\nevaluate\nEvaluate a model on training and validation datasets.\n\n\nevaluate_dataset\nHelper function to evaluate a single dataset.\n\n\n\n\n\nevaluate.evaluate(cfg, dataset_meta)\nEvaluate a model on training and validation datasets.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ndataset_meta\nTrainDatasetMeta\nDataset metadata containing training and evaluation datasets.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nDict[str, float]\nDictionary mapping metric names to their values.\n\n\n\n\n\n\n\nevaluate.evaluate_dataset(trainer, dataset, dataset_type, flash_optimum=False)\nHelper function to evaluate a single dataset.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntrainer\nTrainer\nThe trainer instance.\nrequired\n\n\ndataset\nDataset\nDataset to evaluate.\nrequired\n\n\ndataset_type\nstr\nType of dataset (‘train’ or ‘eval’).\nrequired\n\n\nflash_optimum\nbool\nWhether to use flash optimum.\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nOptional[Dict[str, float]]\nDictionary of metrics or None if dataset is None." + "text": "Name\nDescription\n\n\n\n\nUserDefinedDatasetConfig\ndataclass configuration representing a userdefined dataset type\n\n\nUserDefinedPromptTokenizationStrategy\nPrompt Tokenization Strategy for user defined prompts\n\n\n\n\n\nprompt_strategies.user_defined.UserDefinedDatasetConfig(\n system_prompt='',\n field_system='system',\n field_instruction='instruction',\n field_input='input',\n field_output='output',\n format='{instruction} {input} ',\n no_input_format='{instruction} ',\n system_format='{system}',\n)\ndataclass configuration representing a userdefined dataset type\n\n\n\nprompt_strategies.user_defined.UserDefinedPromptTokenizationStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nPrompt Tokenization Strategy for user defined prompts" }, { - "objectID": "docs/api/loaders.processor.html", - "href": "docs/api/loaders.processor.html", - "title": "loaders.processor", + "objectID": "docs/api/core.trainers.grpo.sampler.html", + "href": "docs/api/core.trainers.grpo.sampler.html", + "title": "core.trainers.grpo.sampler", "section": "", - "text": "loaders.processor\nloaders.processor\nProcessor loading functionality for multi-modal models" + "text": "core.trainers.grpo.sampler\nRepeat random sampler (similar to the one implemented in\nhttps://github.com/huggingface/trl/blob/main/trl/trainer/grpo_trainer.py) that adds\nsequence parallelism functionality; i.e., duplicating data across ranks in the same\nsequence parallel group.\n\n\n\n\n\nName\nDescription\n\n\n\n\nSequenceParallelRepeatRandomSampler\nSampler for GRPO training with sequence parallelism.\n\n\n\n\n\ncore.trainers.grpo.sampler.SequenceParallelRepeatRandomSampler(\n dataset,\n mini_repeat_count,\n world_size,\n rank,\n batch_size=1,\n repeat_count=1,\n sequence_parallel_degree=1,\n shuffle=True,\n seed=0,\n drop_last=False,\n)\nSampler for GRPO training with sequence parallelism.\nThis sampler ensures:\n- Ranks in the same sequence parallel (SP) group receive identical data.\n- Each index is repeated multiple times for sampling different completions.\n- Entire batches are repeated for reuse in multiple updates.\n- Data is properly distributed across SP groups.\nIn the table below, the values represent dataset indices. Each SP group has\nsequence_parallel_degree = 2 GPUs working together on the same data. There are 2\nSP groups (SP0 and SP1), with world_size = 4 total GPUs.\n Sequence Parallel Groups\n | SP0 | SP1 |\n | GPU 0 | GPU 1 | GPU 2 | GPU 3 |\n global_step step <---> mini_repeat_count=3\n <----------> batch_size=2 per SP group\ngrad_accum=2 ▲ ▲ 0 0 [0 0 0 1 1 1] [2 2 2 3 3 3] <- SP groups get different data\n▼ | 0 1 [0 0 0 1 1 1] [2 2 2 3 3 3] <- Same data for each SP group GPU\n|\n| 1 2 [0 0 0 1 1 1] [2 2 2 3 3 3] <- Repeat same indices for iterations\nnum_iterations=2 ▼ 1 3 [0 0 0 1 1 1] [2 2 2 3 3 3] <- When using gradient accumulation\n 2 4 [4 4 4 5 5 5] [6 6 6 7 7 7] <- New batch of data indices\n 2 5 [4 4 4 5 5 5] [6 6 6 7 7 7]\n ...\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ndataset\nSized\nDataset to sample from.\nrequired\n\n\nmini_repeat_count\nint\nHow many times to repeat each sample immediately.\nrequired\n\n\nworld_size\nint\nTotal number of processes.\nrequired\n\n\nrank\nint\nRank of current process.\nrequired\n\n\nbatch_size\nint\nNumber of samples per batch.\n1\n\n\nrepeat_count\nint\nHow many times to repeat the full sampling process.\n1\n\n\nsequence_parallel_degree\nint\nNumber of ranks in a sequence parallel group.\n1\n\n\nshuffle\nbool\nWhether to shuffle the dataset.\nTrue\n\n\nseed\nint\nRandom seed for shuffling.\n0\n\n\ndrop_last\nbool\nWhether to drop the last incomplete batch.\nFalse\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nset_epoch\nSets the epoch for this sampler.\n\n\n\n\n\ncore.trainers.grpo.sampler.SequenceParallelRepeatRandomSampler.set_epoch(epoch)\nSets the epoch for this sampler.\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nepoch\nint\nEpoch number to use for shuffling.\nrequired" }, { - "objectID": "docs/api/common.const.html", - "href": "docs/api/common.const.html", - "title": "common.const", + "objectID": "docs/api/core.trainers.grpo.sampler.html#classes", + "href": "docs/api/core.trainers.grpo.sampler.html#classes", + "title": "core.trainers.grpo.sampler", "section": "", - "text": "common.const\ncommon.const\nVarious shared constants" + "text": "Name\nDescription\n\n\n\n\nSequenceParallelRepeatRandomSampler\nSampler for GRPO training with sequence parallelism.\n\n\n\n\n\ncore.trainers.grpo.sampler.SequenceParallelRepeatRandomSampler(\n dataset,\n mini_repeat_count,\n world_size,\n rank,\n batch_size=1,\n repeat_count=1,\n sequence_parallel_degree=1,\n shuffle=True,\n seed=0,\n drop_last=False,\n)\nSampler for GRPO training with sequence parallelism.\nThis sampler ensures:\n- Ranks in the same sequence parallel (SP) group receive identical data.\n- Each index is repeated multiple times for sampling different completions.\n- Entire batches are repeated for reuse in multiple updates.\n- Data is properly distributed across SP groups.\nIn the table below, the values represent dataset indices. Each SP group has\nsequence_parallel_degree = 2 GPUs working together on the same data. There are 2\nSP groups (SP0 and SP1), with world_size = 4 total GPUs.\n Sequence Parallel Groups\n | SP0 | SP1 |\n | GPU 0 | GPU 1 | GPU 2 | GPU 3 |\n global_step step <---> mini_repeat_count=3\n <----------> batch_size=2 per SP group\ngrad_accum=2 ▲ ▲ 0 0 [0 0 0 1 1 1] [2 2 2 3 3 3] <- SP groups get different data\n▼ | 0 1 [0 0 0 1 1 1] [2 2 2 3 3 3] <- Same data for each SP group GPU\n|\n| 1 2 [0 0 0 1 1 1] [2 2 2 3 3 3] <- Repeat same indices for iterations\nnum_iterations=2 ▼ 1 3 [0 0 0 1 1 1] [2 2 2 3 3 3] <- When using gradient accumulation\n 2 4 [4 4 4 5 5 5] [6 6 6 7 7 7] <- New batch of data indices\n 2 5 [4 4 4 5 5 5] [6 6 6 7 7 7]\n ...\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ndataset\nSized\nDataset to sample from.\nrequired\n\n\nmini_repeat_count\nint\nHow many times to repeat each sample immediately.\nrequired\n\n\nworld_size\nint\nTotal number of processes.\nrequired\n\n\nrank\nint\nRank of current process.\nrequired\n\n\nbatch_size\nint\nNumber of samples per batch.\n1\n\n\nrepeat_count\nint\nHow many times to repeat the full sampling process.\n1\n\n\nsequence_parallel_degree\nint\nNumber of ranks in a sequence parallel group.\n1\n\n\nshuffle\nbool\nWhether to shuffle the dataset.\nTrue\n\n\nseed\nint\nRandom seed for shuffling.\n0\n\n\ndrop_last\nbool\nWhether to drop the last incomplete batch.\nFalse\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nset_epoch\nSets the epoch for this sampler.\n\n\n\n\n\ncore.trainers.grpo.sampler.SequenceParallelRepeatRandomSampler.set_epoch(epoch)\nSets the epoch for this sampler.\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nepoch\nint\nEpoch number to use for shuffling.\nrequired" }, { - "objectID": "docs/api/prompt_strategies.bradley_terry.llama3.html", - "href": "docs/api/prompt_strategies.bradley_terry.llama3.html", - "title": "prompt_strategies.bradley_terry.llama3", + "objectID": "docs/api/utils.ctx_managers.sequence_parallel.html", + "href": "docs/api/utils.ctx_managers.sequence_parallel.html", + "title": "utils.ctx_managers.sequence_parallel", "section": "", - "text": "prompt_strategies.bradley_terry.llama3\nchatml transforms for datasets with system, input, chosen, rejected to match llama3 chat template\n\n\n\n\n\nName\nDescription\n\n\n\n\nicr\nchatml transforms for datasets with system, input, chosen, rejected\n\n\n\n\n\nprompt_strategies.bradley_terry.llama3.icr(cfg, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected\nex. https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs" + "text": "utils.ctx_managers.sequence_parallel\nModule for Axolotl trainer sequence parallelism manager and utilities\n\n\n\n\n\nName\nDescription\n\n\n\n\nAllGatherWithGrad\nCustom autograd function for all-gather to preserve gradients.\n\n\nSequenceParallelContextManager\nContext manager for sequence parallelism operations.\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.AllGatherWithGrad()\nCustom autograd function for all-gather to preserve gradients.\n\n\n\n\n\nName\nDescription\n\n\n\n\nbackward\nBackward pass for all-gather operation.\n\n\nforward\nForward pass of all-gather of data with sequence dimension.\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.AllGatherWithGrad.backward(\n ctx,\n grad_output,\n)\nBackward pass for all-gather operation.\nExtracts the gradient slice corresponding to this rank’s original input\nfrom the full gradient tensor.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nctx\ntorch.autograd.function.FunctionCtx\ntorch.autograd function context.\nrequired\n\n\ngrad_output\ntorch.Tensor\nGradient from subsequent layers with respect to the concatenated output tensor.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[torch.Tensor, None]\nTuple containing the gradient slice for this rank’s input tensor and None for the process group parameter which doesn’t require gradients.\n\n\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.AllGatherWithGrad.forward(\n ctx,\n input_tensor,\n group,\n)\nForward pass of all-gather of data with sequence dimension.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nctx\ntorch.autograd.function.FunctionCtx\ntorch.autograd function context.\nrequired\n\n\ninput_tensor\ntorch.Tensor\nTensor from model output with sequence dimension.\nrequired\n\n\ngroup\ndist.ProcessGroup\ntorch.distributed process group.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nTensor from gathering the input_tensor from across the process group and concatenating along the sequence dimension.\n\n\n\n\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.SequenceParallelContextManager(\n models,\n sequence_parallel_degree,\n gradient_accumulation_steps,\n ring_attn_func,\n heads_k_stride,\n)\nContext manager for sequence parallelism operations.\nThis class provides a context that will automatically apply sequence parallelism\nduring model forward passes using a pre-forward hook, and gather outputs from\nacross the sequence parallelism group using a post-forward hook.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nmodels\nlist[nn.Module]\nList of models to apply sequence parallelism to pre- and post- forward hooks.\nrequired\n\n\nsequence_parallel_degree\nint\nNumber of processes to split sequences over.\nrequired\n\n\ngradient_accumulation_steps\nint\nNumber of steps to accumulate gradients over.\nrequired\n\n\nring_attn_func\nRingAttnFunc\nWhich ring attention function to use. Currently unused.\nrequired\n\n\nheads_k_stride\nint | None\nSequence parallelism K head stride size. Passed through to varlen_llama3 ring_flash_attn implementation.\nrequired\n\n\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\napply_sequence_parallelism\nApply sequence parallelism slicing to a batch.\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.apply_sequence_parallelism(\n batch,\n local_rank,\n local_world_size,\n gradient_accumulation_steps,\n ring_attn_func,\n)\nApply sequence parallelism slicing to a batch.\nSpecial handling is implemented for integer logits_to_keep, which indicates\nto only keep the last N tokens in the sequence during generation.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nbatch\ndict[str, torch.Tensor]\nBatch dictionary (e.g., input_ids, attention_mask, etc.).\nrequired\n\n\nlocal_rank\nint\nLocal rank in the sequence parallel group.\nrequired\n\n\nlocal_world_size\nint\nWorld size of the sequence parallel group.\nrequired\n\n\ngradient_accumulation_steps\nint\nNumber of steps to accumulate gradients over.\nrequired\n\n\nring_attn_func\nRingAttnFunc\nWhich ring attention function to use. Currently unused, but related to above TODO.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[dict[str, torch.Tensor], int, int]\ntuple of: - Batch dictionary with sliced tensors. - The original sequence length before padding. - The number of padding tokens added." }, { - "objectID": "docs/api/prompt_strategies.bradley_terry.llama3.html#functions", - "href": "docs/api/prompt_strategies.bradley_terry.llama3.html#functions", - "title": "prompt_strategies.bradley_terry.llama3", + "objectID": "docs/api/utils.ctx_managers.sequence_parallel.html#classes", + "href": "docs/api/utils.ctx_managers.sequence_parallel.html#classes", + "title": "utils.ctx_managers.sequence_parallel", "section": "", - "text": "Name\nDescription\n\n\n\n\nicr\nchatml transforms for datasets with system, input, chosen, rejected\n\n\n\n\n\nprompt_strategies.bradley_terry.llama3.icr(cfg, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected\nex. https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs" + "text": "Name\nDescription\n\n\n\n\nAllGatherWithGrad\nCustom autograd function for all-gather to preserve gradients.\n\n\nSequenceParallelContextManager\nContext manager for sequence parallelism operations.\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.AllGatherWithGrad()\nCustom autograd function for all-gather to preserve gradients.\n\n\n\n\n\nName\nDescription\n\n\n\n\nbackward\nBackward pass for all-gather operation.\n\n\nforward\nForward pass of all-gather of data with sequence dimension.\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.AllGatherWithGrad.backward(\n ctx,\n grad_output,\n)\nBackward pass for all-gather operation.\nExtracts the gradient slice corresponding to this rank’s original input\nfrom the full gradient tensor.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nctx\ntorch.autograd.function.FunctionCtx\ntorch.autograd function context.\nrequired\n\n\ngrad_output\ntorch.Tensor\nGradient from subsequent layers with respect to the concatenated output tensor.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[torch.Tensor, None]\nTuple containing the gradient slice for this rank’s input tensor and None for the process group parameter which doesn’t require gradients.\n\n\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.AllGatherWithGrad.forward(\n ctx,\n input_tensor,\n group,\n)\nForward pass of all-gather of data with sequence dimension.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nctx\ntorch.autograd.function.FunctionCtx\ntorch.autograd function context.\nrequired\n\n\ninput_tensor\ntorch.Tensor\nTensor from model output with sequence dimension.\nrequired\n\n\ngroup\ndist.ProcessGroup\ntorch.distributed process group.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nTensor from gathering the input_tensor from across the process group and concatenating along the sequence dimension.\n\n\n\n\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.SequenceParallelContextManager(\n models,\n sequence_parallel_degree,\n gradient_accumulation_steps,\n ring_attn_func,\n heads_k_stride,\n)\nContext manager for sequence parallelism operations.\nThis class provides a context that will automatically apply sequence parallelism\nduring model forward passes using a pre-forward hook, and gather outputs from\nacross the sequence parallelism group using a post-forward hook.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nmodels\nlist[nn.Module]\nList of models to apply sequence parallelism to pre- and post- forward hooks.\nrequired\n\n\nsequence_parallel_degree\nint\nNumber of processes to split sequences over.\nrequired\n\n\ngradient_accumulation_steps\nint\nNumber of steps to accumulate gradients over.\nrequired\n\n\nring_attn_func\nRingAttnFunc\nWhich ring attention function to use. Currently unused.\nrequired\n\n\nheads_k_stride\nint | None\nSequence parallelism K head stride size. Passed through to varlen_llama3 ring_flash_attn implementation.\nrequired" }, { - "objectID": "docs/api/utils.distributed.html", - "href": "docs/api/utils.distributed.html", - "title": "utils.distributed", + "objectID": "docs/api/utils.ctx_managers.sequence_parallel.html#functions", + "href": "docs/api/utils.ctx_managers.sequence_parallel.html#functions", + "title": "utils.ctx_managers.sequence_parallel", "section": "", - "text": "utils.distributed\nutility helpers for distributed checks\n\n\n\n\n\nName\nDescription\n\n\n\n\nbarrier\nActs as a barrier to wait for all processes. This ensures that all processes\n\n\ncleanup_distributed\nDestroy process group if torch distributed is initialized. Called in training early\n\n\ncompute_and_broadcast\nCompute a value using the function ‘fn’ only on the specified rank (default is 0).\n\n\ngather_from_all_ranks\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\n\n\ngather_scalar_from_all_ranks\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\n\n\nis_distributed\nCheck if distributed training is initialized.\n\n\nis_main_process\nCheck if the current process is the main process. If not in distributed mode,\n\n\nreduce_and_broadcast\nRun a callable ‘fn1’ on all ranks, gather the results, reduce them using ‘fn2’,\n\n\nzero_first\nruns the wrapped context so that rank 0 runs first before other ranks\n\n\n\n\n\nutils.distributed.barrier()\nActs as a barrier to wait for all processes. This ensures that all processes\nreach the barrier before proceeding further.\n\n\n\nutils.distributed.cleanup_distributed()\nDestroy process group if torch distributed is initialized. Called in training early\ntermination or when training successfully completes.\n\n\n\nutils.distributed.compute_and_broadcast(fn)\nCompute a value using the function ‘fn’ only on the specified rank (default is 0).\nThe value is then broadcasted to all other ranks.\nArgs:\n- fn (callable): A function that computes the value. This should not have any side effects.\n- rank (int, optional): The rank that computes the value. Default is 0.\nReturns:\n- The computed value (int or float).\n\n\n\nutils.distributed.gather_from_all_ranks(fn, world_size=1)\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\nArgs:\n- fn (callable): A function that computes the value. This should not have any side effects.\n- rank (int, optional): The rank that gathers the values. Default is 0.\n- world_size (int, optional): Total number of processes in the current distributed setup.\nReturns:\n- A list of computed values from all ranks if on the gathering rank, otherwise None.\n\n\n\nutils.distributed.gather_scalar_from_all_ranks(fn, world_size=1)\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\nArgs:\n- fn (callable): A function that computes the value. This should not have any side effects.\n- rank (int, optional): The rank that gathers the values. Default is 0.\n- world_size (int, optional): Total number of processes in the current distributed setup.\nReturns:\n- A list of computed values from all ranks if on the gathering rank, otherwise None.\n\n\n\nutils.distributed.is_distributed()\nCheck if distributed training is initialized.\n\n\n\nutils.distributed.is_main_process(use_environ=False)\nCheck if the current process is the main process. If not in distributed mode,\nalways return True.\nArgs:\n- use_environ (bool, optional): Use environment variable to determine main process.\nReturns:\n- bool: True if the current process is the main process, False otherwise.\n\n\n\nutils.distributed.reduce_and_broadcast(fn1, fn2)\nRun a callable ‘fn1’ on all ranks, gather the results, reduce them using ‘fn2’,\nand then broadcast the reduced result to all ranks.\nArgs:\n- fn1 (callable): A function that computes the value on each rank.\n- fn2 (callable): A reduction function that takes a list of values and returns a single value.\n- world_size (int, optional): Total number of processes in the current distributed setup.\nReturns:\n- The reduced and broadcasted value.\n\n\n\nutils.distributed.zero_first(is_main)\nruns the wrapped context so that rank 0 runs first before other ranks" + "text": "Name\nDescription\n\n\n\n\napply_sequence_parallelism\nApply sequence parallelism slicing to a batch.\n\n\n\n\n\nutils.ctx_managers.sequence_parallel.apply_sequence_parallelism(\n batch,\n local_rank,\n local_world_size,\n gradient_accumulation_steps,\n ring_attn_func,\n)\nApply sequence parallelism slicing to a batch.\nSpecial handling is implemented for integer logits_to_keep, which indicates\nto only keep the last N tokens in the sequence during generation.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nbatch\ndict[str, torch.Tensor]\nBatch dictionary (e.g., input_ids, attention_mask, etc.).\nrequired\n\n\nlocal_rank\nint\nLocal rank in the sequence parallel group.\nrequired\n\n\nlocal_world_size\nint\nWorld size of the sequence parallel group.\nrequired\n\n\ngradient_accumulation_steps\nint\nNumber of steps to accumulate gradients over.\nrequired\n\n\nring_attn_func\nRingAttnFunc\nWhich ring attention function to use. Currently unused, but related to above TODO.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[dict[str, torch.Tensor], int, int]\ntuple of: - Batch dictionary with sliced tensors. - The original sequence length before padding. - The number of padding tokens added." }, { - "objectID": "docs/api/utils.distributed.html#functions", - "href": "docs/api/utils.distributed.html#functions", - "title": "utils.distributed", + "objectID": "docs/api/monkeypatch.data.batch_dataset_fetcher.html", + "href": "docs/api/monkeypatch.data.batch_dataset_fetcher.html", + "title": "monkeypatch.data.batch_dataset_fetcher", "section": "", - "text": "Name\nDescription\n\n\n\n\nbarrier\nActs as a barrier to wait for all processes. This ensures that all processes\n\n\ncleanup_distributed\nDestroy process group if torch distributed is initialized. Called in training early\n\n\ncompute_and_broadcast\nCompute a value using the function ‘fn’ only on the specified rank (default is 0).\n\n\ngather_from_all_ranks\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\n\n\ngather_scalar_from_all_ranks\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\n\n\nis_distributed\nCheck if distributed training is initialized.\n\n\nis_main_process\nCheck if the current process is the main process. If not in distributed mode,\n\n\nreduce_and_broadcast\nRun a callable ‘fn1’ on all ranks, gather the results, reduce them using ‘fn2’,\n\n\nzero_first\nruns the wrapped context so that rank 0 runs first before other ranks\n\n\n\n\n\nutils.distributed.barrier()\nActs as a barrier to wait for all processes. This ensures that all processes\nreach the barrier before proceeding further.\n\n\n\nutils.distributed.cleanup_distributed()\nDestroy process group if torch distributed is initialized. Called in training early\ntermination or when training successfully completes.\n\n\n\nutils.distributed.compute_and_broadcast(fn)\nCompute a value using the function ‘fn’ only on the specified rank (default is 0).\nThe value is then broadcasted to all other ranks.\nArgs:\n- fn (callable): A function that computes the value. This should not have any side effects.\n- rank (int, optional): The rank that computes the value. Default is 0.\nReturns:\n- The computed value (int or float).\n\n\n\nutils.distributed.gather_from_all_ranks(fn, world_size=1)\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\nArgs:\n- fn (callable): A function that computes the value. This should not have any side effects.\n- rank (int, optional): The rank that gathers the values. Default is 0.\n- world_size (int, optional): Total number of processes in the current distributed setup.\nReturns:\n- A list of computed values from all ranks if on the gathering rank, otherwise None.\n\n\n\nutils.distributed.gather_scalar_from_all_ranks(fn, world_size=1)\nRun a callable ‘fn’ on all ranks and gather the results on the specified rank.\nArgs:\n- fn (callable): A function that computes the value. This should not have any side effects.\n- rank (int, optional): The rank that gathers the values. Default is 0.\n- world_size (int, optional): Total number of processes in the current distributed setup.\nReturns:\n- A list of computed values from all ranks if on the gathering rank, otherwise None.\n\n\n\nutils.distributed.is_distributed()\nCheck if distributed training is initialized.\n\n\n\nutils.distributed.is_main_process(use_environ=False)\nCheck if the current process is the main process. If not in distributed mode,\nalways return True.\nArgs:\n- use_environ (bool, optional): Use environment variable to determine main process.\nReturns:\n- bool: True if the current process is the main process, False otherwise.\n\n\n\nutils.distributed.reduce_and_broadcast(fn1, fn2)\nRun a callable ‘fn1’ on all ranks, gather the results, reduce them using ‘fn2’,\nand then broadcast the reduced result to all ranks.\nArgs:\n- fn1 (callable): A function that computes the value on each rank.\n- fn2 (callable): A reduction function that takes a list of values and returns a single value.\n- world_size (int, optional): Total number of processes in the current distributed setup.\nReturns:\n- The reduced and broadcasted value.\n\n\n\nutils.distributed.zero_first(is_main)\nruns the wrapped context so that rank 0 runs first before other ranks" + "text": "monkeypatch.data.batch_dataset_fetcher\nmonkeypatch.data.batch_dataset_fetcher\nmonkey patches for the dataset fetcher to handle batches of packed indexes" }, { - "objectID": "docs/api/utils.tokenization.html", - "href": "docs/api/utils.tokenization.html", - "title": "utils.tokenization", + "objectID": "docs/api/core.trainers.utils.html", + "href": "docs/api/core.trainers.utils.html", + "title": "core.trainers.utils", "section": "", - "text": "utils.tokenization\nModule for tokenization utilities\n\n\n\n\n\nName\nDescription\n\n\n\n\ncolor_token_for_rl_debug\nHelper function to color tokens based on their type.\n\n\nprocess_tokens_for_rl_debug\nHelper function to process and color tokens.\n\n\n\n\n\nutils.tokenization.color_token_for_rl_debug(\n decoded_token,\n encoded_token,\n color,\n text_only,\n)\nHelper function to color tokens based on their type.\n\n\n\nutils.tokenization.process_tokens_for_rl_debug(\n tokens,\n color,\n tokenizer,\n text_only,\n)\nHelper function to process and color tokens." + "text": "core.trainers.utils\ncore.trainers.utils\nUtils for Axolotl trainers" }, { - "objectID": "docs/api/utils.tokenization.html#functions", - "href": "docs/api/utils.tokenization.html#functions", - "title": "utils.tokenization", + "objectID": "docs/api/kernels.quantize.html", + "href": "docs/api/kernels.quantize.html", + "title": "kernels.quantize", "section": "", - "text": "Name\nDescription\n\n\n\n\ncolor_token_for_rl_debug\nHelper function to color tokens based on their type.\n\n\nprocess_tokens_for_rl_debug\nHelper function to process and color tokens.\n\n\n\n\n\nutils.tokenization.color_token_for_rl_debug(\n decoded_token,\n encoded_token,\n color,\n text_only,\n)\nHelper function to color tokens based on their type.\n\n\n\nutils.tokenization.process_tokens_for_rl_debug(\n tokens,\n color,\n tokenizer,\n text_only,\n)\nHelper function to process and color tokens." + "text": "kernels.quantize\nDequantization utilities for bitsandbytes integration.\n\n\n\n\n\nName\nDescription\n\n\n\n\ndequantize\nFast NF4 dequantization using bitsandbytes CUDA kernels.\n\n\n\n\n\nkernels.quantize.dequantize(W, quant_state=None, out=None)\nFast NF4 dequantization using bitsandbytes CUDA kernels.\nPerforms efficient dequantization of weights from NF4 format using bitsandbytes’\noptimized CUDA implementations. Supports both legacy list and new QuantState\nformats.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nW\ntorch.Tensor\nQuantized weight tensor to dequantize\nrequired\n\n\nquant_state\nQuantState | list | None\nQuantization state containing metadata needed for dequantization. Can be either a QuantState object or legacy list format. If None, returns W unchanged.\nNone\n\n\nout\ntorch.Tensor | None\nOptional output tensor for storing dequantized results. Must match expected shape and dtype if provided.\nNone\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nDequantized tensor in the specified dtype (fp16 or bf16). Will be transposed if\n\n\n\ntorch.Tensor\ninput W was transposed.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nAssertionError\nIf provided output tensor doesn’t match expected shape / dtype.\n\n\n\n\n\n\nUses CUDA streams for better performance when available in newer bitsandbytes\nversions (>0.43.3)." }, { - "objectID": "docs/api/utils.schemas.integrations.html", - "href": "docs/api/utils.schemas.integrations.html", - "title": "utils.schemas.integrations", + "objectID": "docs/api/kernels.quantize.html#functions", + "href": "docs/api/kernels.quantize.html#functions", + "title": "kernels.quantize", "section": "", - "text": "utils.schemas.integrations\nPydantic models for Axolotl integrations\n\n\n\n\n\nName\nDescription\n\n\n\n\nCometConfig\nComet configuration subset\n\n\nGradioConfig\nGradio configuration subset\n\n\nLISAConfig\nLISA configuration subset\n\n\nMLFlowConfig\nMLFlow configuration subset\n\n\nRayConfig\nRay launcher configuration subset\n\n\nWandbConfig\nWandb configuration subset\n\n\n\n\n\nutils.schemas.integrations.CometConfig()\nComet configuration subset\n\n\n\nutils.schemas.integrations.GradioConfig()\nGradio configuration subset\n\n\n\nutils.schemas.integrations.LISAConfig()\nLISA configuration subset\n\n\n\nutils.schemas.integrations.MLFlowConfig()\nMLFlow configuration subset\n\n\n\nutils.schemas.integrations.RayConfig()\nRay launcher configuration subset\n\n\n\nutils.schemas.integrations.WandbConfig()\nWandb configuration subset" + "text": "Name\nDescription\n\n\n\n\ndequantize\nFast NF4 dequantization using bitsandbytes CUDA kernels.\n\n\n\n\n\nkernels.quantize.dequantize(W, quant_state=None, out=None)\nFast NF4 dequantization using bitsandbytes CUDA kernels.\nPerforms efficient dequantization of weights from NF4 format using bitsandbytes’\noptimized CUDA implementations. Supports both legacy list and new QuantState\nformats.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nW\ntorch.Tensor\nQuantized weight tensor to dequantize\nrequired\n\n\nquant_state\nQuantState | list | None\nQuantization state containing metadata needed for dequantization. Can be either a QuantState object or legacy list format. If None, returns W unchanged.\nNone\n\n\nout\ntorch.Tensor | None\nOptional output tensor for storing dequantized results. Must match expected shape and dtype if provided.\nNone\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nDequantized tensor in the specified dtype (fp16 or bf16). Will be transposed if\n\n\n\ntorch.Tensor\ninput W was transposed.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nAssertionError\nIf provided output tensor doesn’t match expected shape / dtype.\n\n\n\n\n\n\nUses CUDA streams for better performance when available in newer bitsandbytes\nversions (>0.43.3)." }, { - "objectID": "docs/api/utils.schemas.integrations.html#classes", - "href": "docs/api/utils.schemas.integrations.html#classes", - "title": "utils.schemas.integrations", + "objectID": "docs/api/utils.dict.html", + "href": "docs/api/utils.dict.html", + "title": "utils.dict", "section": "", - "text": "Name\nDescription\n\n\n\n\nCometConfig\nComet configuration subset\n\n\nGradioConfig\nGradio configuration subset\n\n\nLISAConfig\nLISA configuration subset\n\n\nMLFlowConfig\nMLFlow configuration subset\n\n\nRayConfig\nRay launcher configuration subset\n\n\nWandbConfig\nWandb configuration subset\n\n\n\n\n\nutils.schemas.integrations.CometConfig()\nComet configuration subset\n\n\n\nutils.schemas.integrations.GradioConfig()\nGradio configuration subset\n\n\n\nutils.schemas.integrations.LISAConfig()\nLISA configuration subset\n\n\n\nutils.schemas.integrations.MLFlowConfig()\nMLFlow configuration subset\n\n\n\nutils.schemas.integrations.RayConfig()\nRay launcher configuration subset\n\n\n\nutils.schemas.integrations.WandbConfig()\nWandb configuration subset" + "text": "utils.dict\nModule containing the DictDefault class\n\n\n\n\n\nName\nDescription\n\n\n\n\nDictDefault\nA Dict that returns None instead of returning empty Dict for missing keys.\n\n\n\n\n\nutils.dict.DictDefault()\nA Dict that returns None instead of returning empty Dict for missing keys." }, { - "objectID": "docs/api/utils.schedulers.html", - "href": "docs/api/utils.schedulers.html", - "title": "utils.schedulers", + "objectID": "docs/api/utils.dict.html#classes", + "href": "docs/api/utils.dict.html#classes", + "title": "utils.dict", "section": "", - "text": "utils.schedulers\nModule for custom LRScheduler class\n\n\n\n\n\nName\nDescription\n\n\n\n\nInterpolatingLogScheduler\nA scheduler that interpolates learning rates in a logarithmic fashion\n\n\nRexLR\nReflected Exponential (REX) learning rate scheduler.\n\n\n\n\n\nutils.schedulers.InterpolatingLogScheduler(\n self,\n optimizer,\n num_steps,\n min_lr,\n max_lr,\n last_epoch=-1,\n)\nA scheduler that interpolates learning rates in a logarithmic fashion\n\n\n\nutils.schedulers.RexLR(\n self,\n optimizer,\n max_lr,\n min_lr,\n total_steps=0,\n num_warmup_steps=0,\n last_step=0,\n)\nReflected Exponential (REX) learning rate scheduler.\n\nOriginal implementation: https://github.com/IvanVassi/REX_LR\nOriginal license: Apache 2.0\nBased on: https://arxiv.org/abs/2107.04197\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\noptimizer\ntorch.optim.Optimizer\nThe optimizer to schedule the learning rate for.\nrequired\n\n\nmax_lr\nfloat\nThe maximum learning rate.\nrequired\n\n\nmin_lr\nfloat\nThe minimum learning rate.\nrequired\n\n\ntotal_steps\nint\nThe total number of training steps.\n0\n\n\nnum_warmup_steps\nint\nThe number of warmup steps.\n0\n\n\nlast_step\nint\nThe index of last step.\n0\n\n\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_cosine_schedule_with_min_lr\n\n\n\nget_cosine_schedule_with_quadratic_warmup\nCreate a schedule with a learning rate that decreases following the values of the cosine function between the\n\n\nget_cosine_schedule_with_warmup_decay_constant\nImplementation of Continual Pre-Training of Large Language Models: How to (re)warm your model? (https://arxiv.org/pdf/2308.04014.pdf)\n\n\n\n\n\nutils.schedulers.get_cosine_schedule_with_min_lr(\n optimizer,\n num_warmup_steps,\n num_training_steps,\n min_lr_ratio=0.0,\n)\n\n\n\nlinear warmup from 0 -> max_lr over num_warmup_steps\ncosine learning rate annealing from max_lr -> min_lr over num_training_steps\n\n\n\n\n\nutils.schedulers.get_cosine_schedule_with_quadratic_warmup(\n optimizer,\n num_warmup_steps,\n num_training_steps,\n num_cycles=0.5,\n last_epoch=-1,\n)\nCreate a schedule with a learning rate that decreases following the values of the cosine function between the\ninitial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the\ninitial lr set in the optimizer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\noptimizer\n[~torch.optim.Optimizer]\nThe optimizer for which to schedule the learning rate.\nrequired\n\n\nnum_warmup_steps\nint\nThe number of steps for the warmup phase.\nrequired\n\n\nnum_training_steps\nint\nThe total number of training steps.\nrequired\n\n\nnum_cycles\nfloat, optional, defaults to 0.5\nThe number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine).\n0.5\n\n\nlast_epoch\nint, optional, defaults to -1\nThe index of the last epoch when resuming training.\n-1\n\n\n\n\n\n\ntorch.optim.lr_scheduler.LambdaLR with the appropriate schedule.\n\n\n\n\nutils.schedulers.get_cosine_schedule_with_warmup_decay_constant(\n optimizer,\n num_warmup_steps,\n num_training_steps,\n constant_lr_ratio,\n min_lr_ratio,\n num_cycles=0.5,\n last_epoch=-1,\n)\nImplementation of Continual Pre-Training of Large Language Models: How to (re)warm your model? (https://arxiv.org/pdf/2308.04014.pdf)\nCreate a schedule with a learning rate that decreases following the values of the cosine function between the\ninitial lr set in the optimizer to min_lr_ratio until num_training_steps * constant_lr_ratio, after constant_rate returns constant value of min_rate\n, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\noptimizer\n[~torch.optim.Optimizer]\nThe optimizer for which to schedule the learning rate.\nrequired\n\n\nnum_warmup_steps\nint\nThe number of steps for the warmup phase.\nrequired\n\n\nnum_training_steps\nint\nThe total number of training steps.\nrequired\n\n\nconstant_lr_ratio\nfloat\n(float): The ratio of num_training_steps to decrease by cosine function.\nrequired\n\n\nmin_lr_ratio\nfloat\n(float): The ratio of maximum learning rate for cosine function to decay to minimum learning rate. | _required_ | | num_cycles |float, *optional*, defaults to 0.5 | The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). |0.5| | last_epoch |int, *optional*, defaults to -1 | The index of the last epoch when resuming training. |-1`\n\n\n\n\n\n\n\ntorch.optim.lr_scheduler.LambdaLR with the appropriate schedule." + "text": "Name\nDescription\n\n\n\n\nDictDefault\nA Dict that returns None instead of returning empty Dict for missing keys.\n\n\n\n\n\nutils.dict.DictDefault()\nA Dict that returns None instead of returning empty Dict for missing keys." }, { - "objectID": "docs/api/utils.schedulers.html#classes", - "href": "docs/api/utils.schedulers.html#classes", - "title": "utils.schedulers", + "objectID": "docs/api/utils.trainer.html", + "href": "docs/api/utils.trainer.html", + "title": "utils.trainer", "section": "", - "text": "Name\nDescription\n\n\n\n\nInterpolatingLogScheduler\nA scheduler that interpolates learning rates in a logarithmic fashion\n\n\nRexLR\nReflected Exponential (REX) learning rate scheduler.\n\n\n\n\n\nutils.schedulers.InterpolatingLogScheduler(\n self,\n optimizer,\n num_steps,\n min_lr,\n max_lr,\n last_epoch=-1,\n)\nA scheduler that interpolates learning rates in a logarithmic fashion\n\n\n\nutils.schedulers.RexLR(\n self,\n optimizer,\n max_lr,\n min_lr,\n total_steps=0,\n num_warmup_steps=0,\n last_step=0,\n)\nReflected Exponential (REX) learning rate scheduler.\n\nOriginal implementation: https://github.com/IvanVassi/REX_LR\nOriginal license: Apache 2.0\nBased on: https://arxiv.org/abs/2107.04197\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\noptimizer\ntorch.optim.Optimizer\nThe optimizer to schedule the learning rate for.\nrequired\n\n\nmax_lr\nfloat\nThe maximum learning rate.\nrequired\n\n\nmin_lr\nfloat\nThe minimum learning rate.\nrequired\n\n\ntotal_steps\nint\nThe total number of training steps.\n0\n\n\nnum_warmup_steps\nint\nThe number of warmup steps.\n0\n\n\nlast_step\nint\nThe index of last step.\n0" + "text": "utils.trainer\nModule containing the Trainer class and related functions\n\n\n\n\n\nName\nDescription\n\n\n\n\nadd_pose_position_ids\nuse the PoSE technique to extend the context length by randomly skipping\n\n\nadd_position_ids\nHandle both single-example and batched data.\n\n\ndrop_long_seq\nDrop samples whose sequence length is either too long (> sequence_len)\n\n\nsetup_trainer\nHelper method for instantiating and building a (causal or RLHF) trainer.\n\n\n\n\n\nutils.trainer.add_pose_position_ids(\n sample,\n max_context_len=32768,\n split_on_token_ids=None,\n chunks=2,\n)\nuse the PoSE technique to extend the context length by randomly skipping\npositions in the context. We only want to skip right before tokens in\nthe split_on_token_ids list. We should attempt to randomly distribute\nthe skips, but we don’t need the final position_ids to be the full\ncontext_len. There may be multiple turns in the context, so we want to\nmake sure we take into account the maximum possible number of skips\nremaining in each sample.\n\n\n\nutils.trainer.add_position_ids(sample)\nHandle both single-example and batched data.\n- single example: sample[‘input_ids’] is a list[int]\n- batched data: sample[‘input_ids’] is a list[list[int]]\n\n\n\nutils.trainer.drop_long_seq(sample, sequence_len=2048, min_sequence_len=2)\nDrop samples whose sequence length is either too long (> sequence_len)\nor too short (< min_sequence_len).\nWorks for both single-example (list[int]) or batched (list[list[int]]).\n\n\n\nutils.trainer.setup_trainer(\n cfg,\n train_dataset,\n eval_dataset,\n model,\n tokenizer,\n processor,\n total_num_steps,\n model_ref=None,\n peft_config=None,\n)\nHelper method for instantiating and building a (causal or RLHF) trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\n\nAxolotl config object containing training parameters.\nrequired\n\n\ntrain_dataset\n\nDataset to use for training.\nrequired\n\n\neval_dataset\n\nDataset to use for evaluation.\nrequired\n\n\nmodel\n\nThe model to train.\nrequired\n\n\ntokenizer\n\nTokenizer for processing text input.\nrequired\n\n\nprocessor\n\nProcessor for data preparation.\nrequired\n\n\ntotal_num_steps\n\nThe total number of training steps.\nrequired\n\n\nmodel_ref\n\nOptional reference model for RLHF training. Default is None.\nNone\n\n\npeft_config\n\nOptional PEFT (Parameter-Efficient Fine-Tuning) configuration. Default is None.\nNone\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\n\nA trainer instance (either HFRLTrainer or HFCausalTrainer) configured based on the provided parameters." }, { - "objectID": "docs/api/utils.schedulers.html#functions", - "href": "docs/api/utils.schedulers.html#functions", - "title": "utils.schedulers", + "objectID": "docs/api/utils.trainer.html#functions", + "href": "docs/api/utils.trainer.html#functions", + "title": "utils.trainer", "section": "", - "text": "Name\nDescription\n\n\n\n\nget_cosine_schedule_with_min_lr\n\n\n\nget_cosine_schedule_with_quadratic_warmup\nCreate a schedule with a learning rate that decreases following the values of the cosine function between the\n\n\nget_cosine_schedule_with_warmup_decay_constant\nImplementation of Continual Pre-Training of Large Language Models: How to (re)warm your model? (https://arxiv.org/pdf/2308.04014.pdf)\n\n\n\n\n\nutils.schedulers.get_cosine_schedule_with_min_lr(\n optimizer,\n num_warmup_steps,\n num_training_steps,\n min_lr_ratio=0.0,\n)\n\n\n\nlinear warmup from 0 -> max_lr over num_warmup_steps\ncosine learning rate annealing from max_lr -> min_lr over num_training_steps\n\n\n\n\n\nutils.schedulers.get_cosine_schedule_with_quadratic_warmup(\n optimizer,\n num_warmup_steps,\n num_training_steps,\n num_cycles=0.5,\n last_epoch=-1,\n)\nCreate a schedule with a learning rate that decreases following the values of the cosine function between the\ninitial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the\ninitial lr set in the optimizer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\noptimizer\n[~torch.optim.Optimizer]\nThe optimizer for which to schedule the learning rate.\nrequired\n\n\nnum_warmup_steps\nint\nThe number of steps for the warmup phase.\nrequired\n\n\nnum_training_steps\nint\nThe total number of training steps.\nrequired\n\n\nnum_cycles\nfloat, optional, defaults to 0.5\nThe number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine).\n0.5\n\n\nlast_epoch\nint, optional, defaults to -1\nThe index of the last epoch when resuming training.\n-1\n\n\n\n\n\n\ntorch.optim.lr_scheduler.LambdaLR with the appropriate schedule.\n\n\n\n\nutils.schedulers.get_cosine_schedule_with_warmup_decay_constant(\n optimizer,\n num_warmup_steps,\n num_training_steps,\n constant_lr_ratio,\n min_lr_ratio,\n num_cycles=0.5,\n last_epoch=-1,\n)\nImplementation of Continual Pre-Training of Large Language Models: How to (re)warm your model? (https://arxiv.org/pdf/2308.04014.pdf)\nCreate a schedule with a learning rate that decreases following the values of the cosine function between the\ninitial lr set in the optimizer to min_lr_ratio until num_training_steps * constant_lr_ratio, after constant_rate returns constant value of min_rate\n, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\noptimizer\n[~torch.optim.Optimizer]\nThe optimizer for which to schedule the learning rate.\nrequired\n\n\nnum_warmup_steps\nint\nThe number of steps for the warmup phase.\nrequired\n\n\nnum_training_steps\nint\nThe total number of training steps.\nrequired\n\n\nconstant_lr_ratio\nfloat\n(float): The ratio of num_training_steps to decrease by cosine function.\nrequired\n\n\nmin_lr_ratio\nfloat\n(float): The ratio of maximum learning rate for cosine function to decay to minimum learning rate. | _required_ | | num_cycles |float, *optional*, defaults to 0.5 | The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). |0.5| | last_epoch |int, *optional*, defaults to -1 | The index of the last epoch when resuming training. |-1`\n\n\n\n\n\n\n\ntorch.optim.lr_scheduler.LambdaLR with the appropriate schedule." + "text": "Name\nDescription\n\n\n\n\nadd_pose_position_ids\nuse the PoSE technique to extend the context length by randomly skipping\n\n\nadd_position_ids\nHandle both single-example and batched data.\n\n\ndrop_long_seq\nDrop samples whose sequence length is either too long (> sequence_len)\n\n\nsetup_trainer\nHelper method for instantiating and building a (causal or RLHF) trainer.\n\n\n\n\n\nutils.trainer.add_pose_position_ids(\n sample,\n max_context_len=32768,\n split_on_token_ids=None,\n chunks=2,\n)\nuse the PoSE technique to extend the context length by randomly skipping\npositions in the context. We only want to skip right before tokens in\nthe split_on_token_ids list. We should attempt to randomly distribute\nthe skips, but we don’t need the final position_ids to be the full\ncontext_len. There may be multiple turns in the context, so we want to\nmake sure we take into account the maximum possible number of skips\nremaining in each sample.\n\n\n\nutils.trainer.add_position_ids(sample)\nHandle both single-example and batched data.\n- single example: sample[‘input_ids’] is a list[int]\n- batched data: sample[‘input_ids’] is a list[list[int]]\n\n\n\nutils.trainer.drop_long_seq(sample, sequence_len=2048, min_sequence_len=2)\nDrop samples whose sequence length is either too long (> sequence_len)\nor too short (< min_sequence_len).\nWorks for both single-example (list[int]) or batched (list[list[int]]).\n\n\n\nutils.trainer.setup_trainer(\n cfg,\n train_dataset,\n eval_dataset,\n model,\n tokenizer,\n processor,\n total_num_steps,\n model_ref=None,\n peft_config=None,\n)\nHelper method for instantiating and building a (causal or RLHF) trainer.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\n\nAxolotl config object containing training parameters.\nrequired\n\n\ntrain_dataset\n\nDataset to use for training.\nrequired\n\n\neval_dataset\n\nDataset to use for evaluation.\nrequired\n\n\nmodel\n\nThe model to train.\nrequired\n\n\ntokenizer\n\nTokenizer for processing text input.\nrequired\n\n\nprocessor\n\nProcessor for data preparation.\nrequired\n\n\ntotal_num_steps\n\nThe total number of training steps.\nrequired\n\n\nmodel_ref\n\nOptional reference model for RLHF training. Default is None.\nNone\n\n\npeft_config\n\nOptional PEFT (Parameter-Efficient Fine-Tuning) configuration. Default is None.\nNone\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\n\nA trainer instance (either HFRLTrainer or HFCausalTrainer) configured based on the provided parameters." }, { - "objectID": "docs/api/core.trainers.mixins.scheduler.html", - "href": "docs/api/core.trainers.mixins.scheduler.html", - "title": "core.trainers.mixins.scheduler", + "objectID": "docs/api/cli.merge_lora.html", + "href": "docs/api/cli.merge_lora.html", + "title": "cli.merge_lora", "section": "", - "text": "core.trainers.mixins.scheduler\nModule for Axolotl trainer scheduler mixin\n\n\n\n\n\nName\nDescription\n\n\n\n\nSchedulerMixin\nMixin class for scheduler setup in CausalTrainer.\n\n\n\n\n\ncore.trainers.mixins.scheduler.SchedulerMixin()\nMixin class for scheduler setup in CausalTrainer.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncreate_scheduler\nSet up the scheduler. The optimizer of the trainer must have been set up either before this method is called or\n\n\n\n\n\ncore.trainers.mixins.scheduler.SchedulerMixin.create_scheduler(\n num_training_steps,\n optimizer=None,\n)\nSet up the scheduler. The optimizer of the trainer must have been set up either before this method is called or\npassed as an argument.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nnum_training_steps\nint\nThe number of training steps to do.\nrequired\n\n\noptimizer\ntorch.optim.Optimizer\nThe training optimizer\nNone" + "text": "cli.merge_lora\nCLI to merge a trained LoRA into a base model.\n\n\n\n\n\nName\nDescription\n\n\n\n\ndo_cli\nParses axolotl config, CLI args, and calls do_merge_lora. Note that various\n\n\ndo_merge_lora\nCalls transformers’ merge_and_unload on the model given in the axolotl config\n\n\n\n\n\ncli.merge_lora.do_cli(config=Path('examples/'), **kwargs)\nParses axolotl config, CLI args, and calls do_merge_lora. Note that various\nconfig values will be overwritten to allow the LoRA merge logic to work as expected\n(load_in_8bit=False, load_in4bit=False, flash_attention=False, etc.).\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nPath to axolotl config YAML file.\nPath('examples/')\n\n\nkwargs\n\nAdditional keyword arguments to override config file values.\n{}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nValueError\nIf target directory for LoRA merged model does not exist.\n\n\n\n\n\n\n\ncli.merge_lora.do_merge_lora(cfg)\nCalls transformers’ merge_and_unload on the model given in the axolotl config\nalong with the LoRA adapters to combine them into a single base model.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired" }, { - "objectID": "docs/api/core.trainers.mixins.scheduler.html#classes", - "href": "docs/api/core.trainers.mixins.scheduler.html#classes", - "title": "core.trainers.mixins.scheduler", + "objectID": "docs/api/cli.merge_lora.html#functions", + "href": "docs/api/cli.merge_lora.html#functions", + "title": "cli.merge_lora", "section": "", - "text": "Name\nDescription\n\n\n\n\nSchedulerMixin\nMixin class for scheduler setup in CausalTrainer.\n\n\n\n\n\ncore.trainers.mixins.scheduler.SchedulerMixin()\nMixin class for scheduler setup in CausalTrainer.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncreate_scheduler\nSet up the scheduler. The optimizer of the trainer must have been set up either before this method is called or\n\n\n\n\n\ncore.trainers.mixins.scheduler.SchedulerMixin.create_scheduler(\n num_training_steps,\n optimizer=None,\n)\nSet up the scheduler. The optimizer of the trainer must have been set up either before this method is called or\npassed as an argument.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nnum_training_steps\nint\nThe number of training steps to do.\nrequired\n\n\noptimizer\ntorch.optim.Optimizer\nThe training optimizer\nNone" + "text": "Name\nDescription\n\n\n\n\ndo_cli\nParses axolotl config, CLI args, and calls do_merge_lora. Note that various\n\n\ndo_merge_lora\nCalls transformers’ merge_and_unload on the model given in the axolotl config\n\n\n\n\n\ncli.merge_lora.do_cli(config=Path('examples/'), **kwargs)\nParses axolotl config, CLI args, and calls do_merge_lora. Note that various\nconfig values will be overwritten to allow the LoRA merge logic to work as expected\n(load_in_8bit=False, load_in4bit=False, flash_attention=False, etc.).\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nconfig\nUnion[Path, str]\nPath to axolotl config YAML file.\nPath('examples/')\n\n\nkwargs\n\nAdditional keyword arguments to override config file values.\n{}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nValueError\nIf target directory for LoRA merged model does not exist.\n\n\n\n\n\n\n\ncli.merge_lora.do_merge_lora(cfg)\nCalls transformers’ merge_and_unload on the model given in the axolotl config\nalong with the LoRA adapters to combine them into a single base model.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired" }, { - "objectID": "docs/api/core.datasets.chat.html", - "href": "docs/api/core.datasets.chat.html", - "title": "core.datasets.chat", + "objectID": "docs/api/monkeypatch.lora_kernels.html", + "href": "docs/api/monkeypatch.lora_kernels.html", + "title": "monkeypatch.lora_kernels", "section": "", - "text": "core.datasets.chat\nchat dataset module\n\n\n\n\n\nName\nDescription\n\n\n\n\nTokenizedChatDataset\nTokenized chat dataset\n\n\n\n\n\ncore.datasets.chat.TokenizedChatDataset(\n self,\n data,\n model_transform,\n *args,\n message_transform=None,\n formatter=None,\n process_count=None,\n keep_in_memory=False,\n **kwargs,\n)\nTokenized chat dataset" + "text": "monkeypatch.lora_kernels\nModule for patching custom LoRA Triton kernels and torch.autograd functions.\n\n\n\n\n\nName\nDescription\n\n\n\n\nFakeMLP\nplaceholder MLP for triton patching\n\n\n\n\n\nmonkeypatch.lora_kernels.FakeMLP(gate_proj, up_proj, down_proj)\nplaceholder MLP for triton patching\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\napply_lora_kernel_patches\nApplies optimized Triton kernel patches to a PEFT model.\n\n\nget_attention_cls_from_config\nGet the appropriate attention class by inspecting the model config.\n\n\noriginal_apply_o\nOriginal implementation of output projection without optimizations.\n\n\noriginal_apply_qkv\nOriginal implementation of QKV projection without optimizations.\n\n\npatch_self_attn_lora\nGiven an axolotl config, this method patches the inferred attention class forward\n\n\n\n\n\nmonkeypatch.lora_kernels.apply_lora_kernel_patches(model, cfg)\nApplies optimized Triton kernel patches to a PEFT model.\nPatches a PEFT model with optimized implementations for MLP and attention\ncomputations. The optimizations include custom Triton kernels for activation\nfunctions and specialized autograd functions for LoRA computations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nmodel\nPeftModelForCausalLM\nA PEFT model to be patched with optimized kernels.\nrequired\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nPeftModelForCausalLM\nPeftModelForCausalLM\nThe patched model with optimized kernels.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTypeError\nIf the provided model is not a PeftModelForCausalLM.\n\n\n\nNotImplementedError\nIf the model type is not supported.\n\n\n\nAssertionError\nIf multiple adapters are active (currently unsupported).\n\n\n\n\n\n\nThe optimizations require LoRA adapters with no dropout and no bias terms. The\nfunction will skip patching if these conditions aren’t met.\n\n\n\n\nmonkeypatch.lora_kernels.get_attention_cls_from_config(cfg)\nGet the appropriate attention class by inspecting the model config.\nUses dynamic import to support any model architecture that follows\nthe standard transformers naming convention.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nType[nn.Module]\nThe appropriate attention class for the model.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nValueError\nIf base_model not specified or attention class cannot be imported\n\n\n\nImportError\nIf the model module or attention class doesn’t exist\n\n\n\n\n\n\n\nmonkeypatch.lora_kernels.original_apply_o(self, hidden_states)\nOriginal implementation of output projection without optimizations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nself\nnn.Module\nThe attention module instance.\nrequired\n\n\nhidden_states\ntorch.Tensor\nInput tensor of shape [batch_size, seq_len, hidden_dim]`.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nThe output projection result.\n\n\n\n\n\n\n\nmonkeypatch.lora_kernels.original_apply_qkv(self, hidden_states)\nOriginal implementation of QKV projection without optimizations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nself\nnn.Module\nThe attention module instance.\nrequired\n\n\nhidden_states\ntorch.Tensor\nInput tensor of shape [batch_size, seq_len, hidden_dim].\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[torch.Tensor, torch.Tensor, torch.Tensor]\nA tuple (query_states, key_states, value_states) containing the projected states for query, key, and value.\n\n\n\n\n\n\n\nmonkeypatch.lora_kernels.patch_self_attn_lora(cfg)\nGiven an axolotl config, this method patches the inferred attention class forward\npass with optimized LoRA implementations.\nIt modifies the attention class to use optimized QKV and output projections. The\noriginal implementation is preserved and can be restored if needed.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nAssertionError\nIf the required code blocks are not found in the attention implementation." }, { - "objectID": "docs/api/core.datasets.chat.html#classes", - "href": "docs/api/core.datasets.chat.html#classes", - "title": "core.datasets.chat", + "objectID": "docs/api/monkeypatch.lora_kernels.html#classes", + "href": "docs/api/monkeypatch.lora_kernels.html#classes", + "title": "monkeypatch.lora_kernels", "section": "", - "text": "Name\nDescription\n\n\n\n\nTokenizedChatDataset\nTokenized chat dataset\n\n\n\n\n\ncore.datasets.chat.TokenizedChatDataset(\n self,\n data,\n model_transform,\n *args,\n message_transform=None,\n formatter=None,\n process_count=None,\n keep_in_memory=False,\n **kwargs,\n)\nTokenized chat dataset" + "text": "Name\nDescription\n\n\n\n\nFakeMLP\nplaceholder MLP for triton patching\n\n\n\n\n\nmonkeypatch.lora_kernels.FakeMLP(gate_proj, up_proj, down_proj)\nplaceholder MLP for triton patching" }, { - "objectID": "docs/api/utils.callbacks.mlflow_.html", - "href": "docs/api/utils.callbacks.mlflow_.html", - "title": "utils.callbacks.mlflow_", + "objectID": "docs/api/monkeypatch.lora_kernels.html#functions", + "href": "docs/api/monkeypatch.lora_kernels.html#functions", + "title": "monkeypatch.lora_kernels", "section": "", - "text": "utils.callbacks.mlflow_\nMLFlow module for trainer callbacks\n\n\n\n\n\nName\nDescription\n\n\n\n\nSaveAxolotlConfigtoMlflowCallback\nCallback to save axolotl config to mlflow\n\n\n\n\n\nutils.callbacks.mlflow_.SaveAxolotlConfigtoMlflowCallback(\n self,\n axolotl_config_path,\n)\nCallback to save axolotl config to mlflow" + "text": "Name\nDescription\n\n\n\n\napply_lora_kernel_patches\nApplies optimized Triton kernel patches to a PEFT model.\n\n\nget_attention_cls_from_config\nGet the appropriate attention class by inspecting the model config.\n\n\noriginal_apply_o\nOriginal implementation of output projection without optimizations.\n\n\noriginal_apply_qkv\nOriginal implementation of QKV projection without optimizations.\n\n\npatch_self_attn_lora\nGiven an axolotl config, this method patches the inferred attention class forward\n\n\n\n\n\nmonkeypatch.lora_kernels.apply_lora_kernel_patches(model, cfg)\nApplies optimized Triton kernel patches to a PEFT model.\nPatches a PEFT model with optimized implementations for MLP and attention\ncomputations. The optimizations include custom Triton kernels for activation\nfunctions and specialized autograd functions for LoRA computations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nmodel\nPeftModelForCausalLM\nA PEFT model to be patched with optimized kernels.\nrequired\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nPeftModelForCausalLM\nPeftModelForCausalLM\nThe patched model with optimized kernels.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTypeError\nIf the provided model is not a PeftModelForCausalLM.\n\n\n\nNotImplementedError\nIf the model type is not supported.\n\n\n\nAssertionError\nIf multiple adapters are active (currently unsupported).\n\n\n\n\n\n\nThe optimizations require LoRA adapters with no dropout and no bias terms. The\nfunction will skip patching if these conditions aren’t met.\n\n\n\n\nmonkeypatch.lora_kernels.get_attention_cls_from_config(cfg)\nGet the appropriate attention class by inspecting the model config.\nUses dynamic import to support any model architecture that follows\nthe standard transformers naming convention.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nType[nn.Module]\nThe appropriate attention class for the model.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nValueError\nIf base_model not specified or attention class cannot be imported\n\n\n\nImportError\nIf the model module or attention class doesn’t exist\n\n\n\n\n\n\n\nmonkeypatch.lora_kernels.original_apply_o(self, hidden_states)\nOriginal implementation of output projection without optimizations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nself\nnn.Module\nThe attention module instance.\nrequired\n\n\nhidden_states\ntorch.Tensor\nInput tensor of shape [batch_size, seq_len, hidden_dim]`.\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nThe output projection result.\n\n\n\n\n\n\n\nmonkeypatch.lora_kernels.original_apply_qkv(self, hidden_states)\nOriginal implementation of QKV projection without optimizations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nself\nnn.Module\nThe attention module instance.\nrequired\n\n\nhidden_states\ntorch.Tensor\nInput tensor of shape [batch_size, seq_len, hidden_dim].\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[torch.Tensor, torch.Tensor, torch.Tensor]\nA tuple (query_states, key_states, value_states) containing the projected states for query, key, and value.\n\n\n\n\n\n\n\nmonkeypatch.lora_kernels.patch_self_attn_lora(cfg)\nGiven an axolotl config, this method patches the inferred attention class forward\npass with optimized LoRA implementations.\nIt modifies the attention class to use optimized QKV and output projections. The\noriginal implementation is preserved and can be restored if needed.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nAssertionError\nIf the required code blocks are not found in the attention implementation." }, { - "objectID": "docs/api/utils.callbacks.mlflow_.html#classes", - "href": "docs/api/utils.callbacks.mlflow_.html#classes", - "title": "utils.callbacks.mlflow_", + "objectID": "docs/api/core.chat.format.shared.html", + "href": "docs/api/core.chat.format.shared.html", + "title": "core.chat.format.shared", "section": "", - "text": "Name\nDescription\n\n\n\n\nSaveAxolotlConfigtoMlflowCallback\nCallback to save axolotl config to mlflow\n\n\n\n\n\nutils.callbacks.mlflow_.SaveAxolotlConfigtoMlflowCallback(\n self,\n axolotl_config_path,\n)\nCallback to save axolotl config to mlflow" + "text": "core.chat.format.shared\ncore.chat.format.shared\nshared functions for format transforms" }, { - "objectID": "docs/api/monkeypatch.mistral_attn_hijack_flash.html", - "href": "docs/api/monkeypatch.mistral_attn_hijack_flash.html", - "title": "monkeypatch.mistral_attn_hijack_flash", + "objectID": "docs/api/utils.callbacks.comet_.html", + "href": "docs/api/utils.callbacks.comet_.html", + "title": "utils.callbacks.comet_", "section": "", - "text": "monkeypatch.mistral_attn_hijack_flash\nFlash attention monkey patch for mistral model\n\n\n\n\n\nName\nDescription\n\n\n\n\nMistralDecoderLayer\npatched version of MistralDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nmonkeypatch.mistral_attn_hijack_flash.MistralDecoderLayer()\npatched version of MistralDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nName\nDescription\n\n\n\n\nforward\n\n\n\n\n\n\nmonkeypatch.mistral_attn_hijack_flash.MistralDecoderLayer.forward(\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n cu_seqlens=None,\n max_seqlen=None,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nhidden_states\ntorch.FloatTensor\ninput to the layer of shape (batch, seq_len, embed_dim)\nrequired\n\n\nattention_mask\ntorch.FloatTensor, optional\nattention mask of size (batch, 1, tgt_len, src_len) where padding elements are indicated by very large negative values.\nNone\n\n\noutput_attentions\nbool, optional\nWhether or not to return the attentions tensors of all attention layers. See attentions under returned tensors for more detail.\nFalse\n\n\nuse_cache\nbool, optional\nIf set to True, past_key_values key value states are returned and can be used to speed up decoding (see past_key_values).\nFalse\n\n\npast_key_value\nTuple(torch.FloatTensor), optional\ncached past key and value projection states\nNone\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\ngenerate_qkv\n\n\n\n\n\n\nmonkeypatch.mistral_attn_hijack_flash.generate_qkv(\n q,\n k,\n v,\n query_padding_mask=None,\n key_padding_mask=None,\n kvpacked=False,\n qkvpacked=False,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nq\n\n(batch_size, seqlen_q, nheads, d)\nrequired\n\n\nk\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nv\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nquery_padding_mask\n\n(batch_size, seqlen), bool\nNone\n\n\nkey_padding_mask\n\n(batch_size, seqlen), bool\nNone" + "text": "utils.callbacks.comet_\nComet module for trainer callbacks\n\n\n\n\n\nName\nDescription\n\n\n\n\nSaveAxolotlConfigtoCometCallback\nCallback to save axolotl config to comet\n\n\n\n\n\nutils.callbacks.comet_.SaveAxolotlConfigtoCometCallback(axolotl_config_path)\nCallback to save axolotl config to comet" }, { - "objectID": "docs/api/monkeypatch.mistral_attn_hijack_flash.html#classes", - "href": "docs/api/monkeypatch.mistral_attn_hijack_flash.html#classes", - "title": "monkeypatch.mistral_attn_hijack_flash", + "objectID": "docs/api/utils.callbacks.comet_.html#classes", + "href": "docs/api/utils.callbacks.comet_.html#classes", + "title": "utils.callbacks.comet_", "section": "", - "text": "Name\nDescription\n\n\n\n\nMistralDecoderLayer\npatched version of MistralDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nmonkeypatch.mistral_attn_hijack_flash.MistralDecoderLayer()\npatched version of MistralDecoderLayer to pass through the precalculated cu_seqlens\n\n\n\n\n\nName\nDescription\n\n\n\n\nforward\n\n\n\n\n\n\nmonkeypatch.mistral_attn_hijack_flash.MistralDecoderLayer.forward(\n hidden_states,\n attention_mask=None,\n position_ids=None,\n past_key_value=None,\n output_attentions=False,\n use_cache=False,\n cu_seqlens=None,\n max_seqlen=None,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nhidden_states\ntorch.FloatTensor\ninput to the layer of shape (batch, seq_len, embed_dim)\nrequired\n\n\nattention_mask\ntorch.FloatTensor, optional\nattention mask of size (batch, 1, tgt_len, src_len) where padding elements are indicated by very large negative values.\nNone\n\n\noutput_attentions\nbool, optional\nWhether or not to return the attentions tensors of all attention layers. See attentions under returned tensors for more detail.\nFalse\n\n\nuse_cache\nbool, optional\nIf set to True, past_key_values key value states are returned and can be used to speed up decoding (see past_key_values).\nFalse\n\n\npast_key_value\nTuple(torch.FloatTensor), optional\ncached past key and value projection states\nNone" + "text": "Name\nDescription\n\n\n\n\nSaveAxolotlConfigtoCometCallback\nCallback to save axolotl config to comet\n\n\n\n\n\nutils.callbacks.comet_.SaveAxolotlConfigtoCometCallback(axolotl_config_path)\nCallback to save axolotl config to comet" }, { - "objectID": "docs/api/monkeypatch.mistral_attn_hijack_flash.html#functions", - "href": "docs/api/monkeypatch.mistral_attn_hijack_flash.html#functions", - "title": "monkeypatch.mistral_attn_hijack_flash", + "objectID": "docs/api/core.training_args.html", + "href": "docs/api/core.training_args.html", + "title": "core.training_args", "section": "", - "text": "Name\nDescription\n\n\n\n\ngenerate_qkv\n\n\n\n\n\n\nmonkeypatch.mistral_attn_hijack_flash.generate_qkv(\n q,\n k,\n v,\n query_padding_mask=None,\n key_padding_mask=None,\n kvpacked=False,\n qkvpacked=False,\n)\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nq\n\n(batch_size, seqlen_q, nheads, d)\nrequired\n\n\nk\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nv\n\n(batch_size, seqlen_k, nheads_k, d)\nrequired\n\n\nquery_padding_mask\n\n(batch_size, seqlen), bool\nNone\n\n\nkey_padding_mask\n\n(batch_size, seqlen), bool\nNone" + "text": "core.training_args\nextra axolotl specific training args\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlCPOConfig\nCPO config for CPO training\n\n\nAxolotlKTOConfig\nKTO config for KTO training\n\n\nAxolotlORPOConfig\nORPO config for ORPO training\n\n\nAxolotlPRMConfig\nPRM config for PRM training\n\n\nAxolotlRewardConfig\nReward config for Reward training\n\n\nAxolotlTrainingArguments\nTraining arguments for Causal trainer\n\n\nAxolotlTrainingMixins\nMixin class for the Axolotl training args.\n\n\n\n\n\ncore.training_args.AxolotlCPOConfig(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n simpo_gamma=None,\n)\nCPO config for CPO training\n\n\n\ncore.training_args.AxolotlKTOConfig(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nKTO config for KTO training\n\n\n\ncore.training_args.AxolotlORPOConfig(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nORPO config for ORPO training\n\n\n\ncore.training_args.AxolotlPRMConfig(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nPRM config for PRM training\n\n\n\ncore.training_args.AxolotlRewardConfig(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nReward config for Reward training\n\n\n\ncore.training_args.AxolotlTrainingArguments(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nTraining arguments for Causal trainer\nThis code is duplicated due to HF TrainingArguments not setting output_dir with a\ndefault value so it can’t be used as a mixin.\n\n\n\ncore.training_args.AxolotlTrainingMixins(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nMixin class for the Axolotl training args." }, { - "objectID": "docs/api/core.chat.format.llama3x.html", - "href": "docs/api/core.chat.format.llama3x.html", - "title": "core.chat.format.llama3x", + "objectID": "docs/api/core.training_args.html#classes", + "href": "docs/api/core.training_args.html#classes", + "title": "core.training_args", "section": "", - "text": "core.chat.format.llama3x\ncore.chat.format.llama3x\nLlama 3.x chat formatting functions for MessageContents" + "text": "Name\nDescription\n\n\n\n\nAxolotlCPOConfig\nCPO config for CPO training\n\n\nAxolotlKTOConfig\nKTO config for KTO training\n\n\nAxolotlORPOConfig\nORPO config for ORPO training\n\n\nAxolotlPRMConfig\nPRM config for PRM training\n\n\nAxolotlRewardConfig\nReward config for Reward training\n\n\nAxolotlTrainingArguments\nTraining arguments for Causal trainer\n\n\nAxolotlTrainingMixins\nMixin class for the Axolotl training args.\n\n\n\n\n\ncore.training_args.AxolotlCPOConfig(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n simpo_gamma=None,\n)\nCPO config for CPO training\n\n\n\ncore.training_args.AxolotlKTOConfig(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nKTO config for KTO training\n\n\n\ncore.training_args.AxolotlORPOConfig(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nORPO config for ORPO training\n\n\n\ncore.training_args.AxolotlPRMConfig(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nPRM config for PRM training\n\n\n\ncore.training_args.AxolotlRewardConfig(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nReward config for Reward training\n\n\n\ncore.training_args.AxolotlTrainingArguments(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nTraining arguments for Causal trainer\nThis code is duplicated due to HF TrainingArguments not setting output_dir with a\ndefault value so it can’t be used as a mixin.\n\n\n\ncore.training_args.AxolotlTrainingMixins(\n model_type=None,\n lr_quadratic_warmup=False,\n pretraining=False,\n sample_packing=False,\n sample_packing_sequentially=False,\n multipack_real_batches=False,\n eval_sample_packing=None,\n sample_packing_efficiency=1.0,\n sample_packing_bin_size=200,\n sample_packing_group_size=100000,\n max_seq_length=2048,\n relora_steps=None,\n relora_warmup_steps=None,\n relora_anneal_steps=None,\n relora_prune_ratio=0.9,\n bench_split='eval',\n bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',\n do_bench_eval=False,\n do_causal_lm_eval=False,\n max_bench_samples=None,\n bench_source_max_len=2048,\n dataloader_prefetch_factor=None,\n cosine_min_lr_ratio=None,\n cosine_constant_lr_ratio=None,\n loraplus_lr_ratio=None,\n loraplus_lr_embedding=1e-06,\n embedding_lr_scale=None,\n lr_groups=None,\n embedding_lr=None,\n qlora=False,\n orpo_alpha=None,\n lisa_n_layers=None,\n lisa_step_interval=None,\n lisa_layers_attribute=None,\n curriculum_sampling=None,\n alternate_lr_scheduler_type=None,\n chat_template=None,\n kd_ce_alpha=None,\n kd_alpha=1.0,\n kd_temperature=1.0,\n kd_zscore_base_temp=None,\n kd_top_k_before_softmax=None,\n adam_beta3=None,\n adam_epsilon2=None,\n image_size=None,\n image_resize_algorithm=None,\n)\nMixin class for the Axolotl training args." }, { - "objectID": "docs/api/cli.checks.html", - "href": "docs/api/cli.checks.html", - "title": "cli.checks", + "objectID": "docs/api/cli.cloud.modal_.html", + "href": "docs/api/cli.cloud.modal_.html", + "title": "cli.cloud.modal_", "section": "", - "text": "cli.checks\nVarious checks for Axolotl CLI.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncheck_accelerate_default_config\nLogs at warning level if no accelerate config file is found.\n\n\ncheck_user_token\nChecks for HF user info. Check is skipped if HF_HUB_OFFLINE=1.\n\n\n\n\n\ncli.checks.check_accelerate_default_config()\nLogs at warning level if no accelerate config file is found.\n\n\n\ncli.checks.check_user_token()\nChecks for HF user info. Check is skipped if HF_HUB_OFFLINE=1.\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nbool\nBoolean indicating successful check (i.e., HF_HUB_OFFLINE=1 or HF user info is retrieved).\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nLocalTokenNotFoundError\nIf HF user info can’t be retrieved." + "text": "cli.cloud.modal_\nModal Cloud support from CLI\n\n\n\n\n\nName\nDescription\n\n\n\n\nModalCloud\nModal Cloud implementation.\n\n\n\n\n\ncli.cloud.modal_.ModalCloud(config, app=None)\nModal Cloud implementation.\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nrun_cmd\nRun a command inside a folder, with Modal Volume reloading before and commit on success.\n\n\n\n\n\ncli.cloud.modal_.run_cmd(cmd, run_folder, volumes=None)\nRun a command inside a folder, with Modal Volume reloading before and commit on success." }, { - "objectID": "docs/api/cli.checks.html#functions", - "href": "docs/api/cli.checks.html#functions", - "title": "cli.checks", + "objectID": "docs/api/cli.cloud.modal_.html#classes", + "href": "docs/api/cli.cloud.modal_.html#classes", + "title": "cli.cloud.modal_", "section": "", - "text": "Name\nDescription\n\n\n\n\ncheck_accelerate_default_config\nLogs at warning level if no accelerate config file is found.\n\n\ncheck_user_token\nChecks for HF user info. Check is skipped if HF_HUB_OFFLINE=1.\n\n\n\n\n\ncli.checks.check_accelerate_default_config()\nLogs at warning level if no accelerate config file is found.\n\n\n\ncli.checks.check_user_token()\nChecks for HF user info. Check is skipped if HF_HUB_OFFLINE=1.\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nbool\nBoolean indicating successful check (i.e., HF_HUB_OFFLINE=1 or HF user info is retrieved).\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nLocalTokenNotFoundError\nIf HF user info can’t be retrieved." + "text": "Name\nDescription\n\n\n\n\nModalCloud\nModal Cloud implementation.\n\n\n\n\n\ncli.cloud.modal_.ModalCloud(config, app=None)\nModal Cloud implementation." }, { - "objectID": "docs/api/monkeypatch.transformers_fa_utils.html", - "href": "docs/api/monkeypatch.transformers_fa_utils.html", - "title": "monkeypatch.transformers_fa_utils", + "objectID": "docs/api/cli.cloud.modal_.html#functions", + "href": "docs/api/cli.cloud.modal_.html#functions", + "title": "cli.cloud.modal_", "section": "", - "text": "monkeypatch.transformers_fa_utils\nsee https://github.com/huggingface/transformers/pull/35834\n\n\n\n\n\nName\nDescription\n\n\n\n\nfixed_fa_peft_integration_check\nPEFT usually casts the layer norms in float32 for training stability reasons\n\n\n\n\n\nmonkeypatch.transformers_fa_utils.fixed_fa_peft_integration_check(\n query,\n key,\n value,\n target_dtype=None,\n preferred_dtype=None,\n)\nPEFT usually casts the layer norms in float32 for training stability reasons\ntherefore the input hidden states gets silently casted in float32. Hence, we need\ncast them back in float16 / bfloat16 just to be sure everything works as expected.\nThis might slowdown training & inference so it is recommended to not cast the LayerNorms!\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nquery\ntorch.Tensor\nInput query states to be passed to Flash Attention API\nrequired\n\n\nkey\ntorch.Tensor\nInput key states to be passed to Flash Attention API\nrequired\n\n\nvalue\ntorch.Tensor\nInput value states to be passed to Flash Attention API\nrequired\n\n\ntarget_dtype\ntorch.dtype, optional\nThe dtype to convert the attention tensors to. Conversion can be ignored by not providing the target dtype.\nNone\n\n\npreferred_dtype\ntorch.dtype, optional\nThe preferred dtype to convert the attention tensors to regardless of the target dtype.\nNone" + "text": "Name\nDescription\n\n\n\n\nrun_cmd\nRun a command inside a folder, with Modal Volume reloading before and commit on success.\n\n\n\n\n\ncli.cloud.modal_.run_cmd(cmd, run_folder, volumes=None)\nRun a command inside a folder, with Modal Volume reloading before and commit on success." }, { - "objectID": "docs/api/monkeypatch.transformers_fa_utils.html#functions", - "href": "docs/api/monkeypatch.transformers_fa_utils.html#functions", - "title": "monkeypatch.transformers_fa_utils", + "objectID": "docs/api/monkeypatch.relora.html", + "href": "docs/api/monkeypatch.relora.html", + "title": "monkeypatch.relora", "section": "", - "text": "Name\nDescription\n\n\n\n\nfixed_fa_peft_integration_check\nPEFT usually casts the layer norms in float32 for training stability reasons\n\n\n\n\n\nmonkeypatch.transformers_fa_utils.fixed_fa_peft_integration_check(\n query,\n key,\n value,\n target_dtype=None,\n preferred_dtype=None,\n)\nPEFT usually casts the layer norms in float32 for training stability reasons\ntherefore the input hidden states gets silently casted in float32. Hence, we need\ncast them back in float16 / bfloat16 just to be sure everything works as expected.\nThis might slowdown training & inference so it is recommended to not cast the LayerNorms!\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nquery\ntorch.Tensor\nInput query states to be passed to Flash Attention API\nrequired\n\n\nkey\ntorch.Tensor\nInput key states to be passed to Flash Attention API\nrequired\n\n\nvalue\ntorch.Tensor\nInput value states to be passed to Flash Attention API\nrequired\n\n\ntarget_dtype\ntorch.dtype, optional\nThe dtype to convert the attention tensors to. Conversion can be ignored by not providing the target dtype.\nNone\n\n\npreferred_dtype\ntorch.dtype, optional\nThe preferred dtype to convert the attention tensors to regardless of the target dtype.\nNone" + "text": "monkeypatch.relora\nImplements the ReLoRA training procedure from https://arxiv.org/abs/2307.05695, minus the initial full fine-tune.\n\n\n\n\n\nName\nDescription\n\n\n\n\nReLoRACallback\nCallback to merge LoRA weights into the base model and save full-weight checkpoints\n\n\nReLoRAScheduler\nWraps another scheduler to apply per-lora-restart learning rate warmups.\n\n\n\n\n\nmonkeypatch.relora.ReLoRACallback(cfg)\nCallback to merge LoRA weights into the base model and save full-weight checkpoints\n\n\n\nmonkeypatch.relora.ReLoRAScheduler(\n optimizer,\n inner_schedule,\n relora_steps,\n warmup_steps,\n anneal_steps=1,\n min_lr_scale=0.001,\n)\nWraps another scheduler to apply per-lora-restart learning rate warmups." }, { - "objectID": "docs/api/prompt_strategies.llama2_chat.html", - "href": "docs/api/prompt_strategies.llama2_chat.html", - "title": "prompt_strategies.llama2_chat", + "objectID": "docs/api/monkeypatch.relora.html#classes", + "href": "docs/api/monkeypatch.relora.html#classes", + "title": "monkeypatch.relora", "section": "", - "text": "prompt_strategies.llama2_chat\nPrompt Strategy for finetuning Llama2 chat models\nsee also https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/generation.py#L213 for ma reference implementation.\nThis implementation is based on the Vicuna PR and the fastchat repo, see also:\nhttps://github.com/lm-sys/FastChat/blob/cdd7730686cb1bf9ae2b768ee171bdf7d1ff04f3/fastchat/conversation.py#L847\nUse dataset type: “llama2_chat” in conig.yml to use this prompt style.\nE.g. in the config.yml:\ndatasets:\n - path: llama_finetune_train.jsonl\n type: llama2_chat\nThe dataset itself should look like this:\n{'conversations':[{\"from\": \"human\", \"value\": \"Who are you?\"}, {\"from\": \"gpt\", \"value\": \"I am Vicuna\"},...]}\nin a jsonl file. The first message should be from the human, the second from gpt.\nFor a custom system message, the first “from” can be “system” (followed by alternating “human” and “gpt” turns).\nImportant: Don’t use “special_tokens:” in your config.yml if you are not sure what you are doing!\n\n\n\n\n\nName\nDescription\n\n\n\n\nLLama2ChatTokenizingStrategy\nTokenizing strategy for Llama2 prompts.\n\n\nLlama2ChatConversation\nA class that manages prompt templates and keeps all conversation history.\n\n\nLlama2ChatPrompter\nA prompter that generates prompts for Llama2 models.\n\n\n\n\n\nprompt_strategies.llama2_chat.LLama2ChatTokenizingStrategy(\n self,\n *args,\n **kwargs,\n)\nTokenizing strategy for Llama2 prompts.\nadapted from https://github.com/lm-sys/FastChat/blob/main/fastchat/train/train.py\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatConversation(\n self,\n name='llama2',\n system=\"[INST] <<SYS>>\\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\\n\\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\\n<</SYS>>\\n\\n\",\n roles=('[INST]', '[/INST]'),\n messages=list(),\n offset=0,\n)\nA class that manages prompt templates and keeps all conversation history.\ncopied from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py\n\n\n\n\n\nName\nDescription\n\n\n\n\nappend_message\nAppend a new message.\n\n\nget_prompt\nGet the prompt for generation.\n\n\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatConversation.append_message(\n role,\n message,\n)\nAppend a new message.\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatConversation.get_prompt()\nGet the prompt for generation.\n\n\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatPrompter()\nA prompter that generates prompts for Llama2 models." + "text": "Name\nDescription\n\n\n\n\nReLoRACallback\nCallback to merge LoRA weights into the base model and save full-weight checkpoints\n\n\nReLoRAScheduler\nWraps another scheduler to apply per-lora-restart learning rate warmups.\n\n\n\n\n\nmonkeypatch.relora.ReLoRACallback(cfg)\nCallback to merge LoRA weights into the base model and save full-weight checkpoints\n\n\n\nmonkeypatch.relora.ReLoRAScheduler(\n optimizer,\n inner_schedule,\n relora_steps,\n warmup_steps,\n anneal_steps=1,\n min_lr_scale=0.001,\n)\nWraps another scheduler to apply per-lora-restart learning rate warmups." }, { - "objectID": "docs/api/prompt_strategies.llama2_chat.html#classes", - "href": "docs/api/prompt_strategies.llama2_chat.html#classes", - "title": "prompt_strategies.llama2_chat", + "objectID": "docs/api/core.trainers.base.html", + "href": "docs/api/core.trainers.base.html", + "title": "core.trainers.base", "section": "", - "text": "Name\nDescription\n\n\n\n\nLLama2ChatTokenizingStrategy\nTokenizing strategy for Llama2 prompts.\n\n\nLlama2ChatConversation\nA class that manages prompt templates and keeps all conversation history.\n\n\nLlama2ChatPrompter\nA prompter that generates prompts for Llama2 models.\n\n\n\n\n\nprompt_strategies.llama2_chat.LLama2ChatTokenizingStrategy(\n self,\n *args,\n **kwargs,\n)\nTokenizing strategy for Llama2 prompts.\nadapted from https://github.com/lm-sys/FastChat/blob/main/fastchat/train/train.py\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatConversation(\n self,\n name='llama2',\n system=\"[INST] <<SYS>>\\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\\n\\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\\n<</SYS>>\\n\\n\",\n roles=('[INST]', '[/INST]'),\n messages=list(),\n offset=0,\n)\nA class that manages prompt templates and keeps all conversation history.\ncopied from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py\n\n\n\n\n\nName\nDescription\n\n\n\n\nappend_message\nAppend a new message.\n\n\nget_prompt\nGet the prompt for generation.\n\n\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatConversation.append_message(\n role,\n message,\n)\nAppend a new message.\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatConversation.get_prompt()\nGet the prompt for generation.\n\n\n\n\n\nprompt_strategies.llama2_chat.Llama2ChatPrompter()\nA prompter that generates prompts for Llama2 models." + "text": "core.trainers.base\nModule for customized trainers\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlTrainer\nExtend the base Trainer for axolotl helpers\n\n\n\n\n\ncore.trainers.base.AxolotlTrainer(\n *_args,\n bench_data_collator=None,\n eval_data_collator=None,\n dataset_tags=None,\n **kwargs,\n)\nExtend the base Trainer for axolotl helpers\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_eval_dataloader\nGet dataloader for evaluation\n\n\nget_train_dataloader\nGet dataloader for training\n\n\nlog\nLog logs on the various objects watching training, including stored metrics.\n\n\npush_to_hub\nOverwrite the push_to_hub method in order to force-add the tags when pushing the\n\n\n\n\n\ncore.trainers.base.AxolotlTrainer.get_eval_dataloader(eval_dataset=None)\nGet dataloader for evaluation\n\n\n\ncore.trainers.base.AxolotlTrainer.get_train_dataloader()\nGet dataloader for training\n\n\n\ncore.trainers.base.AxolotlTrainer.log(logs, start_time=None)\nLog logs on the various objects watching training, including stored metrics.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nlogs\ndict[str, float]\nThe values to log.\nrequired\n\n\nstart_time\nfloat | None\nThe start of training.\nNone\n\n\n\n\n\n\n\ncore.trainers.base.AxolotlTrainer.push_to_hub(*args, **kwargs)\nOverwrite the push_to_hub method in order to force-add the tags when pushing the\nmodel on the Hub. Please refer to ~transformers.Trainer.push_to_hub for more details." }, { - "objectID": "docs/api/convert.html", - "href": "docs/api/convert.html", - "title": "convert", + "objectID": "docs/api/core.trainers.base.html#classes", + "href": "docs/api/core.trainers.base.html#classes", + "title": "core.trainers.base", "section": "", - "text": "convert\nModule containing File Reader, File Writer, Json Parser, and Jsonl Serializer classes\n\n\n\n\n\nName\nDescription\n\n\n\n\nFileReader\nReads a file and returns its contents as a string\n\n\nFileWriter\nWrites a string to a file\n\n\nJsonParser\nParses a string as JSON and returns the result\n\n\nJsonToJsonlConverter\nConverts a JSON file to JSONL\n\n\nJsonlSerializer\nSerializes a list of JSON objects into a JSONL string\n\n\nStdoutWriter\nWrites a string to stdout\n\n\n\n\n\nconvert.FileReader()\nReads a file and returns its contents as a string\n\n\n\nconvert.FileWriter(self, file_path)\nWrites a string to a file\n\n\n\nconvert.JsonParser()\nParses a string as JSON and returns the result\n\n\n\nconvert.JsonToJsonlConverter(\n self,\n file_reader,\n file_writer,\n json_parser,\n jsonl_serializer,\n)\nConverts a JSON file to JSONL\n\n\n\nconvert.JsonlSerializer()\nSerializes a list of JSON objects into a JSONL string\n\n\n\nconvert.StdoutWriter()\nWrites a string to stdout" + "text": "Name\nDescription\n\n\n\n\nAxolotlTrainer\nExtend the base Trainer for axolotl helpers\n\n\n\n\n\ncore.trainers.base.AxolotlTrainer(\n *_args,\n bench_data_collator=None,\n eval_data_collator=None,\n dataset_tags=None,\n **kwargs,\n)\nExtend the base Trainer for axolotl helpers\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_eval_dataloader\nGet dataloader for evaluation\n\n\nget_train_dataloader\nGet dataloader for training\n\n\nlog\nLog logs on the various objects watching training, including stored metrics.\n\n\npush_to_hub\nOverwrite the push_to_hub method in order to force-add the tags when pushing the\n\n\n\n\n\ncore.trainers.base.AxolotlTrainer.get_eval_dataloader(eval_dataset=None)\nGet dataloader for evaluation\n\n\n\ncore.trainers.base.AxolotlTrainer.get_train_dataloader()\nGet dataloader for training\n\n\n\ncore.trainers.base.AxolotlTrainer.log(logs, start_time=None)\nLog logs on the various objects watching training, including stored metrics.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nlogs\ndict[str, float]\nThe values to log.\nrequired\n\n\nstart_time\nfloat | None\nThe start of training.\nNone\n\n\n\n\n\n\n\ncore.trainers.base.AxolotlTrainer.push_to_hub(*args, **kwargs)\nOverwrite the push_to_hub method in order to force-add the tags when pushing the\nmodel on the Hub. Please refer to ~transformers.Trainer.push_to_hub for more details." }, { - "objectID": "docs/api/convert.html#classes", - "href": "docs/api/convert.html#classes", - "title": "convert", + "objectID": "docs/api/kernels.swiglu.html", + "href": "docs/api/kernels.swiglu.html", + "title": "kernels.swiglu", "section": "", - "text": "Name\nDescription\n\n\n\n\nFileReader\nReads a file and returns its contents as a string\n\n\nFileWriter\nWrites a string to a file\n\n\nJsonParser\nParses a string as JSON and returns the result\n\n\nJsonToJsonlConverter\nConverts a JSON file to JSONL\n\n\nJsonlSerializer\nSerializes a list of JSON objects into a JSONL string\n\n\nStdoutWriter\nWrites a string to stdout\n\n\n\n\n\nconvert.FileReader()\nReads a file and returns its contents as a string\n\n\n\nconvert.FileWriter(self, file_path)\nWrites a string to a file\n\n\n\nconvert.JsonParser()\nParses a string as JSON and returns the result\n\n\n\nconvert.JsonToJsonlConverter(\n self,\n file_reader,\n file_writer,\n json_parser,\n jsonl_serializer,\n)\nConverts a JSON file to JSONL\n\n\n\nconvert.JsonlSerializer()\nSerializes a list of JSON objects into a JSONL string\n\n\n\nconvert.StdoutWriter()\nWrites a string to stdout" + "text": "kernels.swiglu\nModule for definition of SwiGLU Triton kernels.\nSee “GLU Variants Improve Transformer” (https://arxiv.org/abs/2002.05202).\nCredit to unsloth (https://unsloth.ai/) for inspiration for this implementation.\n\n\n\n\n\nName\nDescription\n\n\n\n\nswiglu_backward\nSwiGLU backward pass using in-place operations.\n\n\nswiglu_forward\nSwiGLU forward pass. Computes SwiGLU activation: x * sigmoid(x) * up, where\n\n\n\n\n\nkernels.swiglu.swiglu_backward(grad_output, gate, up)\nSwiGLU backward pass using in-place operations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ngrad_output\ntorch.Tensor\nGradient of loss with respect to output, shape [batch, seq_len, hidden_dim].\nrequired\n\n\ngate\ntorch.Tensor\nGate tensor from forward pass, shape [batch, seq_len, hidden_dim].\nrequired\n\n\nup\ntorch.Tensor\nUp-projection tensor from forward pass, shape [batch, seq_len, hidden_dim].\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[torch.Tensor, torch.Tensor, torch.Tensor]\nTuple containing: - Forward pass output (h) - Gradient with respect to gate (df) - Gradient with respect to up-projection (de)\n\n\n\n\n\n\n\nkernels.swiglu.swiglu_forward(gate, up)\nSwiGLU forward pass. Computes SwiGLU activation: x * sigmoid(x) * up, where\nx is the gate tensor.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ngate\ntorch.Tensor\nInput gate tensor of shape [batch, seq_len, hidden_dim].\nrequired\n\n\nup\ntorch.Tensor\nUp-projection tensor of shape [batch, seq_len, hidden_dim].\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nOutput tensor of shape [batch, seq_len, hidden_dim]." }, { - "objectID": "docs/api/utils.schemas.datasets.html", - "href": "docs/api/utils.schemas.datasets.html", - "title": "utils.schemas.datasets", + "objectID": "docs/api/kernels.swiglu.html#functions", + "href": "docs/api/kernels.swiglu.html#functions", + "title": "kernels.swiglu", "section": "", - "text": "utils.schemas.datasets\nPydantic models for datasets-related configuration\n\n\n\n\n\nName\nDescription\n\n\n\n\nDPODataset\nDPO configuration subset\n\n\nKTODataset\nKTO configuration subset\n\n\nPretrainingDataset\nPretraining dataset configuration subset\n\n\nSFTDataset\nSFT configuration subset\n\n\nStepwiseSupervisedDataset\nStepwise supervised dataset configuration subset\n\n\nUserDefinedDPOType\nUser defined typing for DPO\n\n\nUserDefinedKTOType\nUser defined typing for KTO\n\n\nUserDefinedPrompterType\nStructure for user defined prompt types\n\n\n\n\n\nutils.schemas.datasets.DPODataset()\nDPO configuration subset\n\n\n\nutils.schemas.datasets.KTODataset()\nKTO configuration subset\n\n\n\nutils.schemas.datasets.PretrainingDataset()\nPretraining dataset configuration subset\n\n\n\nutils.schemas.datasets.SFTDataset()\nSFT configuration subset\n\n\n\n\n\nName\nDescription\n\n\n\n\nhandle_legacy_message_fields\nHandle backwards compatibility between legacy message field mapping and new property mapping system.\n\n\n\n\n\nutils.schemas.datasets.SFTDataset.handle_legacy_message_fields(data)\nHandle backwards compatibility between legacy message field mapping and new property mapping system.\n\n\n\n\n\nutils.schemas.datasets.StepwiseSupervisedDataset()\nStepwise supervised dataset configuration subset\n\n\n\nutils.schemas.datasets.UserDefinedDPOType()\nUser defined typing for DPO\n\n\n\nutils.schemas.datasets.UserDefinedKTOType()\nUser defined typing for KTO\n\n\n\nutils.schemas.datasets.UserDefinedPrompterType()\nStructure for user defined prompt types" + "text": "Name\nDescription\n\n\n\n\nswiglu_backward\nSwiGLU backward pass using in-place operations.\n\n\nswiglu_forward\nSwiGLU forward pass. Computes SwiGLU activation: x * sigmoid(x) * up, where\n\n\n\n\n\nkernels.swiglu.swiglu_backward(grad_output, gate, up)\nSwiGLU backward pass using in-place operations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ngrad_output\ntorch.Tensor\nGradient of loss with respect to output, shape [batch, seq_len, hidden_dim].\nrequired\n\n\ngate\ntorch.Tensor\nGate tensor from forward pass, shape [batch, seq_len, hidden_dim].\nrequired\n\n\nup\ntorch.Tensor\nUp-projection tensor from forward pass, shape [batch, seq_len, hidden_dim].\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[torch.Tensor, torch.Tensor, torch.Tensor]\nTuple containing: - Forward pass output (h) - Gradient with respect to gate (df) - Gradient with respect to up-projection (de)\n\n\n\n\n\n\n\nkernels.swiglu.swiglu_forward(gate, up)\nSwiGLU forward pass. Computes SwiGLU activation: x * sigmoid(x) * up, where\nx is the gate tensor.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ngate\ntorch.Tensor\nInput gate tensor of shape [batch, seq_len, hidden_dim].\nrequired\n\n\nup\ntorch.Tensor\nUp-projection tensor of shape [batch, seq_len, hidden_dim].\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntorch.Tensor\nOutput tensor of shape [batch, seq_len, hidden_dim]." }, { - "objectID": "docs/api/utils.schemas.datasets.html#classes", - "href": "docs/api/utils.schemas.datasets.html#classes", - "title": "utils.schemas.datasets", + "objectID": "docs/api/prompt_strategies.metharme.html", + "href": "docs/api/prompt_strategies.metharme.html", + "title": "prompt_strategies.metharme", "section": "", - "text": "Name\nDescription\n\n\n\n\nDPODataset\nDPO configuration subset\n\n\nKTODataset\nKTO configuration subset\n\n\nPretrainingDataset\nPretraining dataset configuration subset\n\n\nSFTDataset\nSFT configuration subset\n\n\nStepwiseSupervisedDataset\nStepwise supervised dataset configuration subset\n\n\nUserDefinedDPOType\nUser defined typing for DPO\n\n\nUserDefinedKTOType\nUser defined typing for KTO\n\n\nUserDefinedPrompterType\nStructure for user defined prompt types\n\n\n\n\n\nutils.schemas.datasets.DPODataset()\nDPO configuration subset\n\n\n\nutils.schemas.datasets.KTODataset()\nKTO configuration subset\n\n\n\nutils.schemas.datasets.PretrainingDataset()\nPretraining dataset configuration subset\n\n\n\nutils.schemas.datasets.SFTDataset()\nSFT configuration subset\n\n\n\n\n\nName\nDescription\n\n\n\n\nhandle_legacy_message_fields\nHandle backwards compatibility between legacy message field mapping and new property mapping system.\n\n\n\n\n\nutils.schemas.datasets.SFTDataset.handle_legacy_message_fields(data)\nHandle backwards compatibility between legacy message field mapping and new property mapping system.\n\n\n\n\n\nutils.schemas.datasets.StepwiseSupervisedDataset()\nStepwise supervised dataset configuration subset\n\n\n\nutils.schemas.datasets.UserDefinedDPOType()\nUser defined typing for DPO\n\n\n\nutils.schemas.datasets.UserDefinedKTOType()\nUser defined typing for KTO\n\n\n\nutils.schemas.datasets.UserDefinedPrompterType()\nStructure for user defined prompt types" + "text": "prompt_strategies.metharme\nModule containing the MetharmenPromptTokenizingStrategy and MetharmePrompter class\n\n\n\n\n\nName\nDescription\n\n\n\n\nMetharmePromptTokenizingStrategy\nTokenizing strategy for the Metharme models\n\n\nMetharmePrompter\nPrompter for the Metharme models.\n\n\n\n\n\nprompt_strategies.metharme.MetharmePromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for the Metharme models\n\n\n\nprompt_strategies.metharme.MetharmePrompter(*args, **kwargs)\nPrompter for the Metharme models." }, { - "objectID": "docs/api/prompt_strategies.dpo.llama3.html", - "href": "docs/api/prompt_strategies.dpo.llama3.html", - "title": "prompt_strategies.dpo.llama3", + "objectID": "docs/api/prompt_strategies.metharme.html#classes", + "href": "docs/api/prompt_strategies.metharme.html#classes", + "title": "prompt_strategies.metharme", "section": "", - "text": "prompt_strategies.dpo.llama3\nDPO strategies for llama-3 chat template\n\n\n\n\n\nName\nDescription\n\n\n\n\nargilla_chat\nfor argilla/dpo-mix-7k conversations\n\n\nicr\nchatml transforms for datasets with system, input, chosen, rejected\n\n\nintel\nFor Intel Orca DPO Pairs\n\n\nultra\nfor ultrafeedback binarized conversations\n\n\n\n\n\nprompt_strategies.dpo.llama3.argilla_chat(cfg, **kwargs)\nfor argilla/dpo-mix-7k conversations\n\n\n\nprompt_strategies.dpo.llama3.icr(cfg, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected\nex. https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs\n\n\n\nprompt_strategies.dpo.llama3.intel(cfg, **kwargs)\nFor Intel Orca DPO Pairs\n\n\n\nprompt_strategies.dpo.llama3.ultra(cfg, **kwargs)\nfor ultrafeedback binarized conversations" + "text": "Name\nDescription\n\n\n\n\nMetharmePromptTokenizingStrategy\nTokenizing strategy for the Metharme models\n\n\nMetharmePrompter\nPrompter for the Metharme models.\n\n\n\n\n\nprompt_strategies.metharme.MetharmePromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for the Metharme models\n\n\n\nprompt_strategies.metharme.MetharmePrompter(*args, **kwargs)\nPrompter for the Metharme models." }, { - "objectID": "docs/api/prompt_strategies.dpo.llama3.html#functions", - "href": "docs/api/prompt_strategies.dpo.llama3.html#functions", - "title": "prompt_strategies.dpo.llama3", + "objectID": "docs/api/loaders.constants.html", + "href": "docs/api/loaders.constants.html", + "title": "loaders.constants", "section": "", - "text": "Name\nDescription\n\n\n\n\nargilla_chat\nfor argilla/dpo-mix-7k conversations\n\n\nicr\nchatml transforms for datasets with system, input, chosen, rejected\n\n\nintel\nFor Intel Orca DPO Pairs\n\n\nultra\nfor ultrafeedback binarized conversations\n\n\n\n\n\nprompt_strategies.dpo.llama3.argilla_chat(cfg, **kwargs)\nfor argilla/dpo-mix-7k conversations\n\n\n\nprompt_strategies.dpo.llama3.icr(cfg, **kwargs)\nchatml transforms for datasets with system, input, chosen, rejected\nex. https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs\n\n\n\nprompt_strategies.dpo.llama3.intel(cfg, **kwargs)\nFor Intel Orca DPO Pairs\n\n\n\nprompt_strategies.dpo.llama3.ultra(cfg, **kwargs)\nfor ultrafeedback binarized conversations" + "text": "loaders.constants\nloaders.constants\nShared constants for axolotl.loaders module" }, { - "objectID": "docs/api/cli.args.html", - "href": "docs/api/cli.args.html", - "title": "cli.args", + "objectID": "docs/api/utils.schemas.enums.html", + "href": "docs/api/utils.schemas.enums.html", + "title": "utils.schemas.enums", "section": "", - "text": "cli.args\nModule for axolotl CLI command arguments.\n\n\n\n\n\nName\nDescription\n\n\n\n\nEvaluateCliArgs\nDataclass with CLI arguments for axolotl evaluate command.\n\n\nInferenceCliArgs\nDataclass with CLI arguments for axolotl inference command.\n\n\nPreprocessCliArgs\nDataclass with CLI arguments for axolotl preprocess command.\n\n\nQuantizeCliArgs\nDataclass with CLI arguments for axolotl quantize command.\n\n\nTrainerCliArgs\nDataclass with CLI arguments for axolotl train command.\n\n\nVllmServeCliArgs\nDataclass with CLI arguments for axolotl vllm-serve command.\n\n\n\n\n\ncli.args.EvaluateCliArgs(\n self,\n debug=False,\n debug_text_only=False,\n debug_num_examples=0,\n)\nDataclass with CLI arguments for axolotl evaluate command.\n\n\n\ncli.args.InferenceCliArgs(self, prompter=None)\nDataclass with CLI arguments for axolotl inference command.\n\n\n\ncli.args.PreprocessCliArgs(\n self,\n debug=False,\n debug_text_only=False,\n debug_num_examples=1,\n prompter=None,\n download=True,\n iterable=None,\n)\nDataclass with CLI arguments for axolotl preprocess command.\n\n\n\ncli.args.QuantizeCliArgs(\n self,\n base_model=None,\n weight_dtype=None,\n activation_dtype=None,\n quantize_embedding=None,\n group_size=None,\n output_dir=None,\n)\nDataclass with CLI arguments for axolotl quantize command.\n\n\n\ncli.args.TrainerCliArgs(\n self,\n debug=False,\n debug_text_only=False,\n debug_num_examples=0,\n merge_lora=False,\n prompter=None,\n shard=False,\n main_process_port=None,\n num_processes=None,\n)\nDataclass with CLI arguments for axolotl train command.\n\n\n\ncli.args.VllmServeCliArgs(\n self,\n tensor_parallel_size=None,\n host=None,\n port=None,\n gpu_memory_utilization=None,\n dtype=None,\n max_model_len=None,\n enable_prefix_caching=None,\n serve_module=None,\n)\nDataclass with CLI arguments for axolotl vllm-serve command." + "text": "utils.schemas.enums\nEnums for Axolotl input config\n\n\n\n\n\nName\nDescription\n\n\n\n\nChatTemplate\nChat templates configuration subset\n\n\nCustomSupportedOptimizers\nCustom supported optimizers\n\n\nRLType\nRL trainer type configuration subset\n\n\nRingAttnFunc\nEnum class for supported ring-flash-attn implementations\n\n\nTorchIntDType\nTorch integer data types - getattr guards against torch < 2.6 which does not support int4\n\n\n\n\n\nutils.schemas.enums.ChatTemplate()\nChat templates configuration subset\n\n\n\nutils.schemas.enums.CustomSupportedOptimizers()\nCustom supported optimizers\n\n\n\nutils.schemas.enums.RLType()\nRL trainer type configuration subset\n\n\n\nutils.schemas.enums.RingAttnFunc()\nEnum class for supported ring-flash-attn implementations\n\n\n\nutils.schemas.enums.TorchIntDType()\nTorch integer data types - getattr guards against torch < 2.6 which does not support int4" }, { - "objectID": "docs/api/cli.args.html#classes", - "href": "docs/api/cli.args.html#classes", - "title": "cli.args", + "objectID": "docs/api/utils.schemas.enums.html#classes", + "href": "docs/api/utils.schemas.enums.html#classes", + "title": "utils.schemas.enums", "section": "", - "text": "Name\nDescription\n\n\n\n\nEvaluateCliArgs\nDataclass with CLI arguments for axolotl evaluate command.\n\n\nInferenceCliArgs\nDataclass with CLI arguments for axolotl inference command.\n\n\nPreprocessCliArgs\nDataclass with CLI arguments for axolotl preprocess command.\n\n\nQuantizeCliArgs\nDataclass with CLI arguments for axolotl quantize command.\n\n\nTrainerCliArgs\nDataclass with CLI arguments for axolotl train command.\n\n\nVllmServeCliArgs\nDataclass with CLI arguments for axolotl vllm-serve command.\n\n\n\n\n\ncli.args.EvaluateCliArgs(\n self,\n debug=False,\n debug_text_only=False,\n debug_num_examples=0,\n)\nDataclass with CLI arguments for axolotl evaluate command.\n\n\n\ncli.args.InferenceCliArgs(self, prompter=None)\nDataclass with CLI arguments for axolotl inference command.\n\n\n\ncli.args.PreprocessCliArgs(\n self,\n debug=False,\n debug_text_only=False,\n debug_num_examples=1,\n prompter=None,\n download=True,\n iterable=None,\n)\nDataclass with CLI arguments for axolotl preprocess command.\n\n\n\ncli.args.QuantizeCliArgs(\n self,\n base_model=None,\n weight_dtype=None,\n activation_dtype=None,\n quantize_embedding=None,\n group_size=None,\n output_dir=None,\n)\nDataclass with CLI arguments for axolotl quantize command.\n\n\n\ncli.args.TrainerCliArgs(\n self,\n debug=False,\n debug_text_only=False,\n debug_num_examples=0,\n merge_lora=False,\n prompter=None,\n shard=False,\n main_process_port=None,\n num_processes=None,\n)\nDataclass with CLI arguments for axolotl train command.\n\n\n\ncli.args.VllmServeCliArgs(\n self,\n tensor_parallel_size=None,\n host=None,\n port=None,\n gpu_memory_utilization=None,\n dtype=None,\n max_model_len=None,\n enable_prefix_caching=None,\n serve_module=None,\n)\nDataclass with CLI arguments for axolotl vllm-serve command." + "text": "Name\nDescription\n\n\n\n\nChatTemplate\nChat templates configuration subset\n\n\nCustomSupportedOptimizers\nCustom supported optimizers\n\n\nRLType\nRL trainer type configuration subset\n\n\nRingAttnFunc\nEnum class for supported ring-flash-attn implementations\n\n\nTorchIntDType\nTorch integer data types - getattr guards against torch < 2.6 which does not support int4\n\n\n\n\n\nutils.schemas.enums.ChatTemplate()\nChat templates configuration subset\n\n\n\nutils.schemas.enums.CustomSupportedOptimizers()\nCustom supported optimizers\n\n\n\nutils.schemas.enums.RLType()\nRL trainer type configuration subset\n\n\n\nutils.schemas.enums.RingAttnFunc()\nEnum class for supported ring-flash-attn implementations\n\n\n\nutils.schemas.enums.TorchIntDType()\nTorch integer data types - getattr guards against torch < 2.6 which does not support int4" }, { - "objectID": "docs/api/monkeypatch.llama_attn_hijack_xformers.html", - "href": "docs/api/monkeypatch.llama_attn_hijack_xformers.html", - "title": "monkeypatch.llama_attn_hijack_xformers", + "objectID": "docs/api/monkeypatch.stablelm_attn_hijack_flash.html", + "href": "docs/api/monkeypatch.stablelm_attn_hijack_flash.html", + "title": "monkeypatch.stablelm_attn_hijack_flash", "section": "", - "text": "monkeypatch.llama_attn_hijack_xformers\nmonkeypatch.llama_attn_hijack_xformers\nDirectly copied the code from https://raw.githubusercontent.com/oobabooga/text-generation-webui/main/modules/llama_attn_hijack.py and made some adjustments" + "text": "monkeypatch.stablelm_attn_hijack_flash\nPyTorch StableLM Epoch model.\n\n\n\n\n\nName\nDescription\n\n\n\n\nrepeat_kv\nThis is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\n\n\nrotate_half\nRotates half the hidden dims of the input.\n\n\n\n\n\nmonkeypatch.stablelm_attn_hijack_flash.repeat_kv(hidden_states, n_rep)\nThis is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\nnum_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)\n\n\n\nmonkeypatch.stablelm_attn_hijack_flash.rotate_half(x)\nRotates half the hidden dims of the input." }, { - "objectID": "docs/api/prompt_strategies.dpo.chat_template.html", - "href": "docs/api/prompt_strategies.dpo.chat_template.html", - "title": "prompt_strategies.dpo.chat_template", + "objectID": "docs/api/monkeypatch.stablelm_attn_hijack_flash.html#functions", + "href": "docs/api/monkeypatch.stablelm_attn_hijack_flash.html#functions", + "title": "monkeypatch.stablelm_attn_hijack_flash", "section": "", - "text": "prompt_strategies.dpo.chat_template\nprompt_strategies.dpo.chat_template\nDPO prompt strategies for using tokenizer chat templates." + "text": "Name\nDescription\n\n\n\n\nrepeat_kv\nThis is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\n\n\nrotate_half\nRotates half the hidden dims of the input.\n\n\n\n\n\nmonkeypatch.stablelm_attn_hijack_flash.repeat_kv(hidden_states, n_rep)\nThis is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\nnum_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)\n\n\n\nmonkeypatch.stablelm_attn_hijack_flash.rotate_half(x)\nRotates half the hidden dims of the input." }, { - "objectID": "docs/api/models.mamba.modeling_mamba.html", - "href": "docs/api/models.mamba.modeling_mamba.html", - "title": "models.mamba.modeling_mamba", + "objectID": "docs/api/utils.callbacks.qat.html", + "href": "docs/api/utils.callbacks.qat.html", + "title": "utils.callbacks.qat", "section": "", - "text": "models.mamba.modeling_mamba\nmodels.mamba.modeling_mamba" + "text": "utils.callbacks.qat\nQAT Callback for HF Causal Trainer\n\n\n\n\n\nName\nDescription\n\n\n\n\nQATCallback\nCallback to toggle fake quantization for the model.\n\n\n\n\n\nutils.callbacks.qat.QATCallback(cfg)\nCallback to toggle fake quantization for the model.\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\ntoggle_fake_quant\nToggle fake quantization for any fake quantized linear or embedding layers in the model.\n\n\n\n\n\nutils.callbacks.qat.toggle_fake_quant(mod, enable)\nToggle fake quantization for any fake quantized linear or embedding layers in the model.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nmod\nnn.Module\nThe module to toggle fake quantization for.\nrequired\n\n\nenable\nbool\nWhether to enable or disable fake quantization.\nrequired" + }, + { + "objectID": "docs/api/utils.callbacks.qat.html#classes", + "href": "docs/api/utils.callbacks.qat.html#classes", + "title": "utils.callbacks.qat", + "section": "", + "text": "Name\nDescription\n\n\n\n\nQATCallback\nCallback to toggle fake quantization for the model.\n\n\n\n\n\nutils.callbacks.qat.QATCallback(cfg)\nCallback to toggle fake quantization for the model." + }, + { + "objectID": "docs/api/utils.callbacks.qat.html#functions", + "href": "docs/api/utils.callbacks.qat.html#functions", + "title": "utils.callbacks.qat", + "section": "", + "text": "Name\nDescription\n\n\n\n\ntoggle_fake_quant\nToggle fake quantization for any fake quantized linear or embedding layers in the model.\n\n\n\n\n\nutils.callbacks.qat.toggle_fake_quant(mod, enable)\nToggle fake quantization for any fake quantized linear or embedding layers in the model.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\nmod\nnn.Module\nThe module to toggle fake quantization for.\nrequired\n\n\nenable\nbool\nWhether to enable or disable fake quantization.\nrequired" + }, + { + "objectID": "docs/api/train.html", + "href": "docs/api/train.html", + "title": "train", + "section": "", + "text": "train\nPrepare and train a model on a dataset. Can also infer from a model or merge lora\n\n\n\n\n\nName\nDescription\n\n\n\n\ncreate_model_card\nCreate a model card for the trained model if needed.\n\n\ndetermine_resume_checkpoint\nDetermine the checkpoint to resume from based on configuration.\n\n\nexecute_training\nExecute the training process with appropriate SDP kernel configurations.\n\n\nhandle_untrained_tokens_fix\nApply fixes for untrained tokens if configured.\n\n\nsave_initial_configs\nSave initial configurations before training.\n\n\nsave_trained_model\nSave the trained model according to configuration and training setup.\n\n\nsetup_model_and_tokenizer\nLoad the tokenizer, processor (for multimodal models), and model based on configuration.\n\n\nsetup_model_and_trainer\nLoad model, tokenizer, trainer, etc. Helper function to encapsulate the full\n\n\nsetup_model_card\nSet up the Axolotl badge and add the Axolotl config to the model card if available.\n\n\nsetup_reference_model\nSet up the reference model for RL training if needed.\n\n\nsetup_signal_handler\nSet up signal handler for graceful termination.\n\n\ntrain\nTrain a model on the given dataset.\n\n\n\n\n\ntrain.create_model_card(cfg, trainer)\nCreate a model card for the trained model if needed.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object with model card creation capabilities.\nrequired\n\n\n\n\n\n\n\ntrain.determine_resume_checkpoint(cfg)\nDetermine the checkpoint to resume from based on configuration.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nstr | None\nPath to the checkpoint to resume from, or None if not resuming.\n\n\n\n\n\n\n\ntrain.execute_training(cfg, trainer, resume_from_checkpoint)\nExecute the training process with appropriate SDP kernel configurations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntrainer\nAny\nThe configured trainer object.\nrequired\n\n\nresume_from_checkpoint\nstr | None\nPath to checkpoint to resume from, if applicable.\nrequired\n\n\n\n\n\n\n\ntrain.handle_untrained_tokens_fix(\n cfg,\n model,\n tokenizer,\n train_dataset,\n safe_serialization,\n)\nApply fixes for untrained tokens if configured.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\nmodel\nPreTrainedModel\nThe model to apply fixes to.\nrequired\n\n\ntokenizer\nPreTrainedTokenizer\nThe tokenizer for token identification.\nrequired\n\n\ntrain_dataset\nDataset\nThe training dataset to use.\nrequired\n\n\nsafe_serialization\nbool\nWhether to use safe serialization when saving.\nrequired\n\n\n\n\n\n\n\ntrain.save_initial_configs(cfg, tokenizer, model, peft_config, processor)\nSave initial configurations before training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntokenizer\nPreTrainedTokenizer\nThe tokenizer to save.\nrequired\n\n\nmodel\nPreTrainedModel\nThe model to save configuration for.\nrequired\n\n\npeft_config\nPeftConfig | None\nThe PEFT configuration to save if applicable.\nrequired\n\n\n\n\n\n\n\ntrain.save_trained_model(cfg, trainer, model, safe_serialization)\nSave the trained model according to configuration and training setup.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntrainer\nAny\nThe trainer object.\nrequired\n\n\nmodel\nPreTrainedModel\nThe trained model to save.\nrequired\n\n\nsafe_serialization\nbool\nWhether to use safe serialization.\nrequired\n\n\n\n\n\n\n\ntrain.setup_model_and_tokenizer(cfg)\nLoad the tokenizer, processor (for multimodal models), and model based on configuration.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[PreTrainedModel, PreTrainedTokenizer, PeftConfig | None, ProcessorMixin | None]\nTuple containing model, tokenizer, peft_config (if LoRA / QLoRA, else None), and processor (if multimodal, else None).\n\n\n\n\n\n\n\ntrain.setup_model_and_trainer(cfg, dataset_meta)\nLoad model, tokenizer, trainer, etc. Helper function to encapsulate the full\ntrainer setup.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration dictionary with training parameters.\nrequired\n\n\ndataset_meta\nTrainDatasetMeta\nObject with training, validation datasets and metadata.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[HFRLTrainerBuilder | HFCausalTrainerBuilder, PeftModel | PreTrainedModel, PreTrainedTokenizer, PeftConfig | None, ProcessorMixin | None]\nTuple of: - Trainer (Causal or RLHF) - Model - Tokenizer - PEFT config - Processor\n\n\n\n\n\n\n\ntrain.setup_model_card(cfg)\nSet up the Axolotl badge and add the Axolotl config to the model card if available.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\ntrain.setup_reference_model(cfg, tokenizer)\nSet up the reference model for RL training if needed.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntokenizer\nPreTrainedTokenizer\nThe tokenizer to use for the reference model.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nPreTrainedModel | None\nReference model if needed for RL training, None otherwise.\n\n\n\n\n\n\n\ntrain.setup_signal_handler(cfg, model, safe_serialization)\nSet up signal handler for graceful termination.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\nmodel\nPreTrainedModel\nThe model to save on termination\nrequired\n\n\nsafe_serialization\nbool\nWhether to use safe serialization when saving\nrequired\n\n\n\n\n\n\n\ntrain.train(cfg, dataset_meta)\nTrain a model on the given dataset.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration dictionary with training parameters\nrequired\n\n\ndataset_meta\nTrainDatasetMeta\nObject with training, validation datasets and metadata\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[PeftModel | PreTrainedModel, PreTrainedTokenizer, Trainer]\nTuple of (model, tokenizer) after training" + }, + { + "objectID": "docs/api/train.html#functions", + "href": "docs/api/train.html#functions", + "title": "train", + "section": "", + "text": "Name\nDescription\n\n\n\n\ncreate_model_card\nCreate a model card for the trained model if needed.\n\n\ndetermine_resume_checkpoint\nDetermine the checkpoint to resume from based on configuration.\n\n\nexecute_training\nExecute the training process with appropriate SDP kernel configurations.\n\n\nhandle_untrained_tokens_fix\nApply fixes for untrained tokens if configured.\n\n\nsave_initial_configs\nSave initial configurations before training.\n\n\nsave_trained_model\nSave the trained model according to configuration and training setup.\n\n\nsetup_model_and_tokenizer\nLoad the tokenizer, processor (for multimodal models), and model based on configuration.\n\n\nsetup_model_and_trainer\nLoad model, tokenizer, trainer, etc. Helper function to encapsulate the full\n\n\nsetup_model_card\nSet up the Axolotl badge and add the Axolotl config to the model card if available.\n\n\nsetup_reference_model\nSet up the reference model for RL training if needed.\n\n\nsetup_signal_handler\nSet up signal handler for graceful termination.\n\n\ntrain\nTrain a model on the given dataset.\n\n\n\n\n\ntrain.create_model_card(cfg, trainer)\nCreate a model card for the trained model if needed.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntrainer\nTrainer\nThe trainer object with model card creation capabilities.\nrequired\n\n\n\n\n\n\n\ntrain.determine_resume_checkpoint(cfg)\nDetermine the checkpoint to resume from based on configuration.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nstr | None\nPath to the checkpoint to resume from, or None if not resuming.\n\n\n\n\n\n\n\ntrain.execute_training(cfg, trainer, resume_from_checkpoint)\nExecute the training process with appropriate SDP kernel configurations.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntrainer\nAny\nThe configured trainer object.\nrequired\n\n\nresume_from_checkpoint\nstr | None\nPath to checkpoint to resume from, if applicable.\nrequired\n\n\n\n\n\n\n\ntrain.handle_untrained_tokens_fix(\n cfg,\n model,\n tokenizer,\n train_dataset,\n safe_serialization,\n)\nApply fixes for untrained tokens if configured.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\nmodel\nPreTrainedModel\nThe model to apply fixes to.\nrequired\n\n\ntokenizer\nPreTrainedTokenizer\nThe tokenizer for token identification.\nrequired\n\n\ntrain_dataset\nDataset\nThe training dataset to use.\nrequired\n\n\nsafe_serialization\nbool\nWhether to use safe serialization when saving.\nrequired\n\n\n\n\n\n\n\ntrain.save_initial_configs(cfg, tokenizer, model, peft_config, processor)\nSave initial configurations before training.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntokenizer\nPreTrainedTokenizer\nThe tokenizer to save.\nrequired\n\n\nmodel\nPreTrainedModel\nThe model to save configuration for.\nrequired\n\n\npeft_config\nPeftConfig | None\nThe PEFT configuration to save if applicable.\nrequired\n\n\n\n\n\n\n\ntrain.save_trained_model(cfg, trainer, model, safe_serialization)\nSave the trained model according to configuration and training setup.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntrainer\nAny\nThe trainer object.\nrequired\n\n\nmodel\nPreTrainedModel\nThe trained model to save.\nrequired\n\n\nsafe_serialization\nbool\nWhether to use safe serialization.\nrequired\n\n\n\n\n\n\n\ntrain.setup_model_and_tokenizer(cfg)\nLoad the tokenizer, processor (for multimodal models), and model based on configuration.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[PreTrainedModel, PreTrainedTokenizer, PeftConfig | None, ProcessorMixin | None]\nTuple containing model, tokenizer, peft_config (if LoRA / QLoRA, else None), and processor (if multimodal, else None).\n\n\n\n\n\n\n\ntrain.setup_model_and_trainer(cfg, dataset_meta)\nLoad model, tokenizer, trainer, etc. Helper function to encapsulate the full\ntrainer setup.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration dictionary with training parameters.\nrequired\n\n\ndataset_meta\nTrainDatasetMeta\nObject with training, validation datasets and metadata.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[HFRLTrainerBuilder | HFCausalTrainerBuilder, PeftModel | PreTrainedModel, PreTrainedTokenizer, PeftConfig | None, ProcessorMixin | None]\nTuple of: - Trainer (Causal or RLHF) - Model - Tokenizer - PEFT config - Processor\n\n\n\n\n\n\n\ntrain.setup_model_card(cfg)\nSet up the Axolotl badge and add the Axolotl config to the model card if available.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\n\n\n\n\n\ntrain.setup_reference_model(cfg, tokenizer)\nSet up the reference model for RL training if needed.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ntokenizer\nPreTrainedTokenizer\nThe tokenizer to use for the reference model.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nPreTrainedModel | None\nReference model if needed for RL training, None otherwise.\n\n\n\n\n\n\n\ntrain.setup_signal_handler(cfg, model, safe_serialization)\nSet up signal handler for graceful termination.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\nmodel\nPreTrainedModel\nThe model to save on termination\nrequired\n\n\nsafe_serialization\nbool\nWhether to use safe serialization when saving\nrequired\n\n\n\n\n\n\n\ntrain.train(cfg, dataset_meta)\nTrain a model on the given dataset.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nThe configuration dictionary with training parameters\nrequired\n\n\ndataset_meta\nTrainDatasetMeta\nObject with training, validation datasets and metadata\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[PeftModel | PreTrainedModel, PreTrainedTokenizer, Trainer]\nTuple of (model, tokenizer) after training" }, { "objectID": "docs/api/prompt_strategies.messages.chat.html", "href": "docs/api/prompt_strategies.messages.chat.html", "title": "prompt_strategies.messages.chat", "section": "", - "text": "prompt_strategies.messages.chat\nChat dataset wrapping strategy for new internal messages representations\n\n\n\n\n\nName\nDescription\n\n\n\n\nChatMessageDatasetWrappingStrategy\nChat dataset wrapping strategy for new internal messages representations\n\n\n\n\n\nprompt_strategies.messages.chat.ChatMessageDatasetWrappingStrategy(\n self,\n processor,\n message_transform=None,\n formatter=None,\n **kwargs,\n)\nChat dataset wrapping strategy for new internal messages representations" + "text": "prompt_strategies.messages.chat\nChat dataset wrapping strategy for new internal messages representations\n\n\n\n\n\nName\nDescription\n\n\n\n\nChatMessageDatasetWrappingStrategy\nChat dataset wrapping strategy for new internal messages representations\n\n\n\n\n\nprompt_strategies.messages.chat.ChatMessageDatasetWrappingStrategy(\n processor,\n message_transform=None,\n formatter=None,\n **kwargs,\n)\nChat dataset wrapping strategy for new internal messages representations" }, { "objectID": "docs/api/prompt_strategies.messages.chat.html#classes", "href": "docs/api/prompt_strategies.messages.chat.html#classes", "title": "prompt_strategies.messages.chat", "section": "", - "text": "Name\nDescription\n\n\n\n\nChatMessageDatasetWrappingStrategy\nChat dataset wrapping strategy for new internal messages representations\n\n\n\n\n\nprompt_strategies.messages.chat.ChatMessageDatasetWrappingStrategy(\n self,\n processor,\n message_transform=None,\n formatter=None,\n **kwargs,\n)\nChat dataset wrapping strategy for new internal messages representations" + "text": "Name\nDescription\n\n\n\n\nChatMessageDatasetWrappingStrategy\nChat dataset wrapping strategy for new internal messages representations\n\n\n\n\n\nprompt_strategies.messages.chat.ChatMessageDatasetWrappingStrategy(\n processor,\n message_transform=None,\n formatter=None,\n **kwargs,\n)\nChat dataset wrapping strategy for new internal messages representations" }, { "objectID": "docs/api/monkeypatch.llama_patch_multipack.html", @@ -2549,14 +2570,14 @@ "href": "docs/api/loaders.patch_manager.html", "title": "loaders.patch_manager", "section": "", - "text": "loaders.patch_manager\nPatch manager class implementation to complement axolotl.loaders.ModelLoader.\nApplies pre- and post-model load patches for various fixes and optimizations.\n\n\n\n\n\nName\nDescription\n\n\n\n\nPatchManager\nManages the application of patches during the model loading process.\n\n\n\n\n\nloaders.patch_manager.PatchManager(self, cfg, model_config, inference=False)\nManages the application of patches during the model loading process.\n\n\n\n\n\nName\nDescription\n\n\n\n\nhas_flash_attn\nCheck if flash attention is installed.\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\napply_post_model_load_patches\nApply patches that require the model instance.\n\n\napply_pre_model_load_patches\nApply pre-model load patches based on config.\n\n\n\n\n\nloaders.patch_manager.PatchManager.apply_post_model_load_patches(model)\nApply patches that require the model instance.\n\n\n\nloaders.patch_manager.PatchManager.apply_pre_model_load_patches()\nApply pre-model load patches based on config." + "text": "loaders.patch_manager\nPatch manager class implementation to complement axolotl.loaders.ModelLoader.\nApplies pre- and post-model load patches for various fixes and optimizations.\n\n\n\n\n\nName\nDescription\n\n\n\n\nPatchManager\nManages the application of patches during the model loading process.\n\n\n\n\n\nloaders.patch_manager.PatchManager(cfg, model_config, inference=False)\nManages the application of patches during the model loading process.\n\n\n\n\n\nName\nDescription\n\n\n\n\nhas_flash_attn\nCheck if flash attention is installed.\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\napply_post_model_load_patches\nApply patches that require the model instance.\n\n\napply_pre_model_load_patches\nApply pre-model load patches based on config.\n\n\n\n\n\nloaders.patch_manager.PatchManager.apply_post_model_load_patches(model)\nApply patches that require the model instance.\n\n\n\nloaders.patch_manager.PatchManager.apply_pre_model_load_patches()\nApply pre-model load patches based on config." }, { "objectID": "docs/api/loaders.patch_manager.html#classes", "href": "docs/api/loaders.patch_manager.html#classes", "title": "loaders.patch_manager", "section": "", - "text": "Name\nDescription\n\n\n\n\nPatchManager\nManages the application of patches during the model loading process.\n\n\n\n\n\nloaders.patch_manager.PatchManager(self, cfg, model_config, inference=False)\nManages the application of patches during the model loading process.\n\n\n\n\n\nName\nDescription\n\n\n\n\nhas_flash_attn\nCheck if flash attention is installed.\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\napply_post_model_load_patches\nApply patches that require the model instance.\n\n\napply_pre_model_load_patches\nApply pre-model load patches based on config.\n\n\n\n\n\nloaders.patch_manager.PatchManager.apply_post_model_load_patches(model)\nApply patches that require the model instance.\n\n\n\nloaders.patch_manager.PatchManager.apply_pre_model_load_patches()\nApply pre-model load patches based on config." + "text": "Name\nDescription\n\n\n\n\nPatchManager\nManages the application of patches during the model loading process.\n\n\n\n\n\nloaders.patch_manager.PatchManager(cfg, model_config, inference=False)\nManages the application of patches during the model loading process.\n\n\n\n\n\nName\nDescription\n\n\n\n\nhas_flash_attn\nCheck if flash attention is installed.\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\napply_post_model_load_patches\nApply patches that require the model instance.\n\n\napply_pre_model_load_patches\nApply pre-model load patches based on config.\n\n\n\n\n\nloaders.patch_manager.PatchManager.apply_post_model_load_patches(model)\nApply patches that require the model instance.\n\n\n\nloaders.patch_manager.PatchManager.apply_pre_model_load_patches()\nApply pre-model load patches based on config." }, { "objectID": "docs/api/utils.schemas.peft.html", @@ -2584,14 +2605,14 @@ "href": "docs/api/core.trainers.grpo.trainer.html", "title": "core.trainers.grpo.trainer", "section": "", - "text": "core.trainers.grpo.trainer\nAxolotl GRPO trainers (with and without sequence parallelism handling)\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlGRPOSequenceParallelTrainer\nExtend the base GRPOTrainer for sequence parallelism handling\n\n\nAxolotlGRPOTrainer\nExtend the base GRPOTrainer for axolotl helpers\n\n\n\n\n\ncore.trainers.grpo.trainer.AxolotlGRPOSequenceParallelTrainer(\n self,\n model,\n reward_funcs,\n args=None,\n train_dataset=None,\n eval_dataset=None,\n processing_class=None,\n reward_processing_classes=None,\n callbacks=None,\n optimizers=(None, None),\n peft_config=None,\n)\nExtend the base GRPOTrainer for sequence parallelism handling\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_train_dataloader\nGet dataloader for training\n\n\n\n\n\ncore.trainers.grpo.trainer.AxolotlGRPOSequenceParallelTrainer.get_train_dataloader(\n)\nGet dataloader for training\n\n\n\n\n\ncore.trainers.grpo.trainer.AxolotlGRPOTrainer()\nExtend the base GRPOTrainer for axolotl helpers" + "text": "core.trainers.grpo.trainer\nAxolotl GRPO trainers (with and without sequence parallelism handling)\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlGRPOSequenceParallelTrainer\nExtend the base GRPOTrainer for sequence parallelism handling\n\n\nAxolotlGRPOTrainer\nExtend the base GRPOTrainer for axolotl helpers\n\n\n\n\n\ncore.trainers.grpo.trainer.AxolotlGRPOSequenceParallelTrainer(\n model,\n reward_funcs,\n args=None,\n train_dataset=None,\n eval_dataset=None,\n processing_class=None,\n reward_processing_classes=None,\n callbacks=None,\n optimizers=(None, None),\n peft_config=None,\n optimizer_cls_and_kwargs=None,\n)\nExtend the base GRPOTrainer for sequence parallelism handling\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_train_dataloader\nGet dataloader for training\n\n\n\n\n\ncore.trainers.grpo.trainer.AxolotlGRPOSequenceParallelTrainer.get_train_dataloader(\n)\nGet dataloader for training\n\n\n\n\n\ncore.trainers.grpo.trainer.AxolotlGRPOTrainer(*args, **kwargs)\nExtend the base GRPOTrainer for axolotl helpers" }, { "objectID": "docs/api/core.trainers.grpo.trainer.html#classes", "href": "docs/api/core.trainers.grpo.trainer.html#classes", "title": "core.trainers.grpo.trainer", "section": "", - "text": "Name\nDescription\n\n\n\n\nAxolotlGRPOSequenceParallelTrainer\nExtend the base GRPOTrainer for sequence parallelism handling\n\n\nAxolotlGRPOTrainer\nExtend the base GRPOTrainer for axolotl helpers\n\n\n\n\n\ncore.trainers.grpo.trainer.AxolotlGRPOSequenceParallelTrainer(\n self,\n model,\n reward_funcs,\n args=None,\n train_dataset=None,\n eval_dataset=None,\n processing_class=None,\n reward_processing_classes=None,\n callbacks=None,\n optimizers=(None, None),\n peft_config=None,\n)\nExtend the base GRPOTrainer for sequence parallelism handling\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_train_dataloader\nGet dataloader for training\n\n\n\n\n\ncore.trainers.grpo.trainer.AxolotlGRPOSequenceParallelTrainer.get_train_dataloader(\n)\nGet dataloader for training\n\n\n\n\n\ncore.trainers.grpo.trainer.AxolotlGRPOTrainer()\nExtend the base GRPOTrainer for axolotl helpers" + "text": "Name\nDescription\n\n\n\n\nAxolotlGRPOSequenceParallelTrainer\nExtend the base GRPOTrainer for sequence parallelism handling\n\n\nAxolotlGRPOTrainer\nExtend the base GRPOTrainer for axolotl helpers\n\n\n\n\n\ncore.trainers.grpo.trainer.AxolotlGRPOSequenceParallelTrainer(\n model,\n reward_funcs,\n args=None,\n train_dataset=None,\n eval_dataset=None,\n processing_class=None,\n reward_processing_classes=None,\n callbacks=None,\n optimizers=(None, None),\n peft_config=None,\n optimizer_cls_and_kwargs=None,\n)\nExtend the base GRPOTrainer for sequence parallelism handling\n\n\n\n\n\nName\nDescription\n\n\n\n\nget_train_dataloader\nGet dataloader for training\n\n\n\n\n\ncore.trainers.grpo.trainer.AxolotlGRPOSequenceParallelTrainer.get_train_dataloader(\n)\nGet dataloader for training\n\n\n\n\n\ncore.trainers.grpo.trainer.AxolotlGRPOTrainer(*args, **kwargs)\nExtend the base GRPOTrainer for axolotl helpers" }, { "objectID": "docs/api/integrations.spectrum.args.html", @@ -2612,28 +2633,28 @@ "href": "docs/api/core.trainers.relora.html", "title": "core.trainers.relora", "section": "", - "text": "core.trainers.relora\nModule for ReLoRA trainer\n\n\n\n\n\nName\nDescription\n\n\n\n\nReLoRATrainer\nTrainer subclass that uses the OneCycleLR scheduler\n\n\n\n\n\ncore.trainers.relora.ReLoRATrainer(self, *args, **kwargs)\nTrainer subclass that uses the OneCycleLR scheduler" + "text": "core.trainers.relora\nModule for ReLoRA trainer\n\n\n\n\n\nName\nDescription\n\n\n\n\nReLoRATrainer\nTrainer subclass that uses the OneCycleLR scheduler\n\n\n\n\n\ncore.trainers.relora.ReLoRATrainer(*args, **kwargs)\nTrainer subclass that uses the OneCycleLR scheduler" }, { "objectID": "docs/api/core.trainers.relora.html#classes", "href": "docs/api/core.trainers.relora.html#classes", "title": "core.trainers.relora", "section": "", - "text": "Name\nDescription\n\n\n\n\nReLoRATrainer\nTrainer subclass that uses the OneCycleLR scheduler\n\n\n\n\n\ncore.trainers.relora.ReLoRATrainer(self, *args, **kwargs)\nTrainer subclass that uses the OneCycleLR scheduler" + "text": "Name\nDescription\n\n\n\n\nReLoRATrainer\nTrainer subclass that uses the OneCycleLR scheduler\n\n\n\n\n\ncore.trainers.relora.ReLoRATrainer(*args, **kwargs)\nTrainer subclass that uses the OneCycleLR scheduler" }, { "objectID": "docs/api/loaders.model.html", "href": "docs/api/loaders.model.html", "title": "loaders.model", "section": "", - "text": "loaders.model\nModel loader class implementation for loading, configuring, and patching various\nmodels.\n\n\n\n\n\nName\nDescription\n\n\n\n\nModelLoader\nManages model configuration, initialization and application of patches during\n\n\n\n\n\nloaders.model.ModelLoader(\n self,\n cfg,\n tokenizer,\n *,\n inference=False,\n reference_model=False,\n **kwargs,\n)\nManages model configuration, initialization and application of patches during\nmodel loading.\nThis class orchestrates the entire process of loading a model from configuration to\nfinal preparation. It handles device mapping, quantization, attention mechanisms,\nadapter integration, and various optimizations.\n\n\n\nLoading and validating model configuration\nApplying monkey patches for optimizations / fixes\nSetting up device mapping (including multi-GPU configurations)\nConfiguring quantization\nSetting attention mechanisms (Flash Attention, SDPA, etc.)\nLoading and initializing the model\nApplying adapters (LoRA, QLoRA, etc.)\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nmodel\nPreTrainedModel | PeftModel | PeftMixedModel\nThe loaded model instance (available after load() is called).\n\n\nmodel_kwargs\ndict[str, Any]\nDictionary of keyword arguments passed to model initialization.\n\n\nbase_model\n\nName or path of the base model to load.\n\n\nmodel_type\n\nType of model to load (e.g., AutoModelForCausalLM).\n\n\nmodel_config\n\nConfiguration object for the model.\n\n\nauto_model_loader\n\nclass used for loading the model (default: AutoModelForCausalLM).\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nload\nLoad and prepare the model with all configurations and patches.\n\n\n\n\n\nloaders.model.ModelLoader.load()\nLoad and prepare the model with all configurations and patches.\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[PreTrainedModel | PeftModelForCausalLM, PeftConfig | None]\nA tuple with the loaded model and its LoRA configuration (if applicable)." + "text": "loaders.model\nModel loader class implementation for loading, configuring, and patching various\nmodels.\n\n\n\n\n\nName\nDescription\n\n\n\n\nModelLoader\nManages model configuration, initialization and application of patches during\n\n\n\n\n\nloaders.model.ModelLoader(\n cfg,\n tokenizer,\n *,\n inference=False,\n reference_model=False,\n **kwargs,\n)\nManages model configuration, initialization and application of patches during\nmodel loading.\nThis class orchestrates the entire process of loading a model from configuration to\nfinal preparation. It handles device mapping, quantization, attention mechanisms,\nadapter integration, and various optimizations.\n\n\n\nLoading and validating model configuration\nApplying monkey patches for optimizations / fixes\nSetting up device mapping (including multi-GPU configurations)\nConfiguring quantization\nSetting attention mechanisms (Flash Attention, SDPA, etc.)\nLoading and initializing the model\nApplying adapters (LoRA, QLoRA, etc.)\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nmodel\nPreTrainedModel | PeftModel | PeftMixedModel\nThe loaded model instance (available after load() is called).\n\n\nmodel_kwargs\ndict[str, Any]\nDictionary of keyword arguments passed to model initialization.\n\n\nbase_model\n\nName or path of the base model to load.\n\n\nmodel_type\n\nType of model to load (e.g., AutoModelForCausalLM).\n\n\nmodel_config\n\nConfiguration object for the model.\n\n\nauto_model_loader\n\nclass used for loading the model (default: AutoModelForCausalLM).\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nload\nLoad and prepare the model with all configurations and patches.\n\n\n\n\n\nloaders.model.ModelLoader.load()\nLoad and prepare the model with all configurations and patches.\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[PreTrainedModel | PeftModelForCausalLM, PeftConfig | None]\nA tuple with the loaded model and its LoRA configuration (if applicable)." }, { "objectID": "docs/api/loaders.model.html#classes", "href": "docs/api/loaders.model.html#classes", "title": "loaders.model", "section": "", - "text": "Name\nDescription\n\n\n\n\nModelLoader\nManages model configuration, initialization and application of patches during\n\n\n\n\n\nloaders.model.ModelLoader(\n self,\n cfg,\n tokenizer,\n *,\n inference=False,\n reference_model=False,\n **kwargs,\n)\nManages model configuration, initialization and application of patches during\nmodel loading.\nThis class orchestrates the entire process of loading a model from configuration to\nfinal preparation. It handles device mapping, quantization, attention mechanisms,\nadapter integration, and various optimizations.\n\n\n\nLoading and validating model configuration\nApplying monkey patches for optimizations / fixes\nSetting up device mapping (including multi-GPU configurations)\nConfiguring quantization\nSetting attention mechanisms (Flash Attention, SDPA, etc.)\nLoading and initializing the model\nApplying adapters (LoRA, QLoRA, etc.)\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nmodel\nPreTrainedModel | PeftModel | PeftMixedModel\nThe loaded model instance (available after load() is called).\n\n\nmodel_kwargs\ndict[str, Any]\nDictionary of keyword arguments passed to model initialization.\n\n\nbase_model\n\nName or path of the base model to load.\n\n\nmodel_type\n\nType of model to load (e.g., AutoModelForCausalLM).\n\n\nmodel_config\n\nConfiguration object for the model.\n\n\nauto_model_loader\n\nclass used for loading the model (default: AutoModelForCausalLM).\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nload\nLoad and prepare the model with all configurations and patches.\n\n\n\n\n\nloaders.model.ModelLoader.load()\nLoad and prepare the model with all configurations and patches.\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[PreTrainedModel | PeftModelForCausalLM, PeftConfig | None]\nA tuple with the loaded model and its LoRA configuration (if applicable)." + "text": "Name\nDescription\n\n\n\n\nModelLoader\nManages model configuration, initialization and application of patches during\n\n\n\n\n\nloaders.model.ModelLoader(\n cfg,\n tokenizer,\n *,\n inference=False,\n reference_model=False,\n **kwargs,\n)\nManages model configuration, initialization and application of patches during\nmodel loading.\nThis class orchestrates the entire process of loading a model from configuration to\nfinal preparation. It handles device mapping, quantization, attention mechanisms,\nadapter integration, and various optimizations.\n\n\n\nLoading and validating model configuration\nApplying monkey patches for optimizations / fixes\nSetting up device mapping (including multi-GPU configurations)\nConfiguring quantization\nSetting attention mechanisms (Flash Attention, SDPA, etc.)\nLoading and initializing the model\nApplying adapters (LoRA, QLoRA, etc.)\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\nmodel\nPreTrainedModel | PeftModel | PeftMixedModel\nThe loaded model instance (available after load() is called).\n\n\nmodel_kwargs\ndict[str, Any]\nDictionary of keyword arguments passed to model initialization.\n\n\nbase_model\n\nName or path of the base model to load.\n\n\nmodel_type\n\nType of model to load (e.g., AutoModelForCausalLM).\n\n\nmodel_config\n\nConfiguration object for the model.\n\n\nauto_model_loader\n\nclass used for loading the model (default: AutoModelForCausalLM).\n\n\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nload\nLoad and prepare the model with all configurations and patches.\n\n\n\n\n\nloaders.model.ModelLoader.load()\nLoad and prepare the model with all configurations and patches.\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\ntuple[PreTrainedModel | PeftModelForCausalLM, PeftConfig | None]\nA tuple with the loaded model and its LoRA configuration (if applicable)." }, { "objectID": "docs/api/utils.bench.html", @@ -2654,14 +2675,14 @@ "href": "docs/api/prompt_strategies.input_output.html", "title": "prompt_strategies.input_output", "section": "", - "text": "prompt_strategies.input_output\nModule for plain input/output prompt pairs\n\n\n\n\n\nName\nDescription\n\n\n\n\nRawInputOutputPrompter\nprompter for raw i/o data\n\n\nRawInputOutputStrategy\nPrompt Strategy class for input/output pairs\n\n\n\n\n\nprompt_strategies.input_output.RawInputOutputPrompter()\nprompter for raw i/o data\n\n\n\nprompt_strategies.input_output.RawInputOutputStrategy(\n self,\n *args,\n eos_token=None,\n **kwargs,\n)\nPrompt Strategy class for input/output pairs" + "text": "prompt_strategies.input_output\nModule for plain input/output prompt pairs\n\n\n\n\n\nName\nDescription\n\n\n\n\nRawInputOutputPrompter\nprompter for raw i/o data\n\n\nRawInputOutputStrategy\nPrompt Strategy class for input/output pairs\n\n\n\n\n\nprompt_strategies.input_output.RawInputOutputPrompter()\nprompter for raw i/o data\n\n\n\nprompt_strategies.input_output.RawInputOutputStrategy(\n *args,\n eos_token=None,\n **kwargs,\n)\nPrompt Strategy class for input/output pairs" }, { "objectID": "docs/api/prompt_strategies.input_output.html#classes", "href": "docs/api/prompt_strategies.input_output.html#classes", "title": "prompt_strategies.input_output", "section": "", - "text": "Name\nDescription\n\n\n\n\nRawInputOutputPrompter\nprompter for raw i/o data\n\n\nRawInputOutputStrategy\nPrompt Strategy class for input/output pairs\n\n\n\n\n\nprompt_strategies.input_output.RawInputOutputPrompter()\nprompter for raw i/o data\n\n\n\nprompt_strategies.input_output.RawInputOutputStrategy(\n self,\n *args,\n eos_token=None,\n **kwargs,\n)\nPrompt Strategy class for input/output pairs" + "text": "Name\nDescription\n\n\n\n\nRawInputOutputPrompter\nprompter for raw i/o data\n\n\nRawInputOutputStrategy\nPrompt Strategy class for input/output pairs\n\n\n\n\n\nprompt_strategies.input_output.RawInputOutputPrompter()\nprompter for raw i/o data\n\n\n\nprompt_strategies.input_output.RawInputOutputStrategy(\n *args,\n eos_token=None,\n **kwargs,\n)\nPrompt Strategy class for input/output pairs" }, { "objectID": "docs/api/cli.inference.html", @@ -2682,28 +2703,28 @@ "href": "docs/api/core.trainers.dpo.trainer.html", "title": "core.trainers.dpo.trainer", "section": "", - "text": "core.trainers.dpo.trainer\nDPO trainer for axolotl\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlDPOTrainer\nExtend the base DPOTrainer for axolotl helpers.\n\n\n\n\n\ncore.trainers.dpo.trainer.AxolotlDPOTrainer(\n self,\n *args,\n dataset_tags=None,\n **kwargs,\n)\nExtend the base DPOTrainer for axolotl helpers.\n\n\n\n\n\nName\nDescription\n\n\n\n\npush_to_hub\nOverwrite the push_to_hub method in order to force-add the tags when pushing\n\n\n\n\n\ncore.trainers.dpo.trainer.AxolotlDPOTrainer.push_to_hub(*args, **kwargs)\nOverwrite the push_to_hub method in order to force-add the tags when pushing\nthe model on the Hub. Please refer to ~transformers.Trainer.push_to_hub\nfor more details." + "text": "core.trainers.dpo.trainer\nDPO trainer for axolotl\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlDPOTrainer\nExtend the base DPOTrainer for axolotl helpers.\n\n\n\n\n\ncore.trainers.dpo.trainer.AxolotlDPOTrainer(*args, dataset_tags=None, **kwargs)\nExtend the base DPOTrainer for axolotl helpers.\n\n\n\n\n\nName\nDescription\n\n\n\n\npush_to_hub\nOverwrite the push_to_hub method in order to force-add the tags when pushing\n\n\n\n\n\ncore.trainers.dpo.trainer.AxolotlDPOTrainer.push_to_hub(*args, **kwargs)\nOverwrite the push_to_hub method in order to force-add the tags when pushing\nthe model on the Hub. Please refer to ~transformers.Trainer.push_to_hub\nfor more details." }, { "objectID": "docs/api/core.trainers.dpo.trainer.html#classes", "href": "docs/api/core.trainers.dpo.trainer.html#classes", "title": "core.trainers.dpo.trainer", "section": "", - "text": "Name\nDescription\n\n\n\n\nAxolotlDPOTrainer\nExtend the base DPOTrainer for axolotl helpers.\n\n\n\n\n\ncore.trainers.dpo.trainer.AxolotlDPOTrainer(\n self,\n *args,\n dataset_tags=None,\n **kwargs,\n)\nExtend the base DPOTrainer for axolotl helpers.\n\n\n\n\n\nName\nDescription\n\n\n\n\npush_to_hub\nOverwrite the push_to_hub method in order to force-add the tags when pushing\n\n\n\n\n\ncore.trainers.dpo.trainer.AxolotlDPOTrainer.push_to_hub(*args, **kwargs)\nOverwrite the push_to_hub method in order to force-add the tags when pushing\nthe model on the Hub. Please refer to ~transformers.Trainer.push_to_hub\nfor more details." + "text": "Name\nDescription\n\n\n\n\nAxolotlDPOTrainer\nExtend the base DPOTrainer for axolotl helpers.\n\n\n\n\n\ncore.trainers.dpo.trainer.AxolotlDPOTrainer(*args, dataset_tags=None, **kwargs)\nExtend the base DPOTrainer for axolotl helpers.\n\n\n\n\n\nName\nDescription\n\n\n\n\npush_to_hub\nOverwrite the push_to_hub method in order to force-add the tags when pushing\n\n\n\n\n\ncore.trainers.dpo.trainer.AxolotlDPOTrainer.push_to_hub(*args, **kwargs)\nOverwrite the push_to_hub method in order to force-add the tags when pushing\nthe model on the Hub. Please refer to ~transformers.Trainer.push_to_hub\nfor more details." }, { "objectID": "docs/api/monkeypatch.gradient_checkpointing.offload_disk.html", "href": "docs/api/monkeypatch.gradient_checkpointing.offload_disk.html", "title": "monkeypatch.gradient_checkpointing.offload_disk", "section": "", - "text": "monkeypatch.gradient_checkpointing.offload_disk\nDISCO - DIsk-based Storage and Checkpointing with Optimized prefetching\n\n\n\n\n\nName\nDescription\n\n\n\n\nDisco\nDisco: DIsk-based Storage and Checkpointing with Optimized prefetching\n\n\nDiskOffloadManager\nManages offloaded tensors and handles prefetching in a separate thread.\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco()\nDisco: DIsk-based Storage and Checkpointing with Optimized prefetching\nAdvanced disk-based gradient checkpointer with prefetching.\n\n\n\n\n\nName\nDescription\n\n\n\n\nbackward\nBackward pass that loads activations from disk with prefetching\n\n\nforward\nForward pass that offloads activations to disk asynchronously\n\n\nget_instance\nGet or create the offload manager\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco.backward(\n ctx,\n *grad_outputs,\n)\nBackward pass that loads activations from disk with prefetching\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco.forward(\n ctx,\n forward_function,\n hidden_states,\n *args,\n prefetch_size=1,\n prefetch_to_gpu=True,\n save_workers=4,\n)\nForward pass that offloads activations to disk asynchronously\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco.get_instance(\n prefetch_size=1,\n prefetch_to_gpu=True,\n save_workers=4,\n)\nGet or create the offload manager\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager(\n self,\n prefetch_size=3,\n prefetch_to_gpu=True,\n save_workers=4,\n)\nManages offloaded tensors and handles prefetching in a separate thread.\nIncludes synchronization to prevent race conditions.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncleanup\nClean up all temp files and stop prefetch thread with proper synchronization\n\n\ncleanup_tensor\nClean up a specific tensor file after it’s been used\n\n\nload_tensor\nLoad tensor from disk or prefetch cache with proper synchronization\n\n\nsave_tensor\nSave tensor to disk asynchronously and return file path with thread-safe operations\n\n\ntrigger_prefetch\nTrigger prefetching of the next N tensors with proper synchronization\n\n\nwait_for_save\nWait for a tensor to be saved to disk\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.cleanup()\nClean up all temp files and stop prefetch thread with proper synchronization\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.cleanup_tensor(\n file_path,\n)\nClean up a specific tensor file after it’s been used\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.load_tensor(\n file_path,\n target_device='cuda',\n)\nLoad tensor from disk or prefetch cache with proper synchronization\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.save_tensor(\n tensor,\n)\nSave tensor to disk asynchronously and return file path with thread-safe operations\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.trigger_prefetch(\n n=None,\n)\nTrigger prefetching of the next N tensors with proper synchronization\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.wait_for_save(\n file_path,\n timeout=None,\n)\nWait for a tensor to be saved to disk" + "text": "monkeypatch.gradient_checkpointing.offload_disk\nDISCO - DIsk-based Storage and Checkpointing with Optimized prefetching\n\n\n\n\n\nName\nDescription\n\n\n\n\nDisco\nDisco: DIsk-based Storage and Checkpointing with Optimized prefetching\n\n\nDiskOffloadManager\nManages offloaded tensors and handles prefetching in a separate thread.\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco()\nDisco: DIsk-based Storage and Checkpointing with Optimized prefetching\nAdvanced disk-based gradient checkpointer with prefetching.\n\n\n\n\n\nName\nDescription\n\n\n\n\nbackward\nBackward pass that loads activations from disk with prefetching\n\n\nforward\nForward pass that offloads activations to disk asynchronously\n\n\nget_instance\nGet or create the offload manager\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco.backward(\n ctx,\n *grad_outputs,\n)\nBackward pass that loads activations from disk with prefetching\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco.forward(\n ctx,\n forward_function,\n hidden_states,\n *args,\n prefetch_size=1,\n prefetch_to_gpu=True,\n save_workers=4,\n)\nForward pass that offloads activations to disk asynchronously\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco.get_instance(\n prefetch_size=1,\n prefetch_to_gpu=True,\n save_workers=4,\n)\nGet or create the offload manager\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager(\n prefetch_size=3,\n prefetch_to_gpu=True,\n save_workers=4,\n)\nManages offloaded tensors and handles prefetching in a separate thread.\nIncludes synchronization to prevent race conditions.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncleanup\nClean up all temp files and stop prefetch thread with proper synchronization\n\n\ncleanup_tensor\nClean up a specific tensor file after it’s been used\n\n\nload_tensor\nLoad tensor from disk or prefetch cache with proper synchronization\n\n\nsave_tensor\nSave tensor to disk asynchronously and return file path with thread-safe operations\n\n\ntrigger_prefetch\nTrigger prefetching of the next N tensors with proper synchronization\n\n\nwait_for_save\nWait for a tensor to be saved to disk\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.cleanup()\nClean up all temp files and stop prefetch thread with proper synchronization\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.cleanup_tensor(\n file_path,\n)\nClean up a specific tensor file after it’s been used\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.load_tensor(\n file_path,\n target_device='cuda',\n)\nLoad tensor from disk or prefetch cache with proper synchronization\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.save_tensor(\n tensor,\n)\nSave tensor to disk asynchronously and return file path with thread-safe operations\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.trigger_prefetch(\n n=None,\n)\nTrigger prefetching of the next N tensors with proper synchronization\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.wait_for_save(\n file_path,\n timeout=None,\n)\nWait for a tensor to be saved to disk" }, { "objectID": "docs/api/monkeypatch.gradient_checkpointing.offload_disk.html#classes", "href": "docs/api/monkeypatch.gradient_checkpointing.offload_disk.html#classes", "title": "monkeypatch.gradient_checkpointing.offload_disk", "section": "", - "text": "Name\nDescription\n\n\n\n\nDisco\nDisco: DIsk-based Storage and Checkpointing with Optimized prefetching\n\n\nDiskOffloadManager\nManages offloaded tensors and handles prefetching in a separate thread.\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco()\nDisco: DIsk-based Storage and Checkpointing with Optimized prefetching\nAdvanced disk-based gradient checkpointer with prefetching.\n\n\n\n\n\nName\nDescription\n\n\n\n\nbackward\nBackward pass that loads activations from disk with prefetching\n\n\nforward\nForward pass that offloads activations to disk asynchronously\n\n\nget_instance\nGet or create the offload manager\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco.backward(\n ctx,\n *grad_outputs,\n)\nBackward pass that loads activations from disk with prefetching\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco.forward(\n ctx,\n forward_function,\n hidden_states,\n *args,\n prefetch_size=1,\n prefetch_to_gpu=True,\n save_workers=4,\n)\nForward pass that offloads activations to disk asynchronously\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco.get_instance(\n prefetch_size=1,\n prefetch_to_gpu=True,\n save_workers=4,\n)\nGet or create the offload manager\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager(\n self,\n prefetch_size=3,\n prefetch_to_gpu=True,\n save_workers=4,\n)\nManages offloaded tensors and handles prefetching in a separate thread.\nIncludes synchronization to prevent race conditions.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncleanup\nClean up all temp files and stop prefetch thread with proper synchronization\n\n\ncleanup_tensor\nClean up a specific tensor file after it’s been used\n\n\nload_tensor\nLoad tensor from disk or prefetch cache with proper synchronization\n\n\nsave_tensor\nSave tensor to disk asynchronously and return file path with thread-safe operations\n\n\ntrigger_prefetch\nTrigger prefetching of the next N tensors with proper synchronization\n\n\nwait_for_save\nWait for a tensor to be saved to disk\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.cleanup()\nClean up all temp files and stop prefetch thread with proper synchronization\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.cleanup_tensor(\n file_path,\n)\nClean up a specific tensor file after it’s been used\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.load_tensor(\n file_path,\n target_device='cuda',\n)\nLoad tensor from disk or prefetch cache with proper synchronization\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.save_tensor(\n tensor,\n)\nSave tensor to disk asynchronously and return file path with thread-safe operations\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.trigger_prefetch(\n n=None,\n)\nTrigger prefetching of the next N tensors with proper synchronization\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.wait_for_save(\n file_path,\n timeout=None,\n)\nWait for a tensor to be saved to disk" + "text": "Name\nDescription\n\n\n\n\nDisco\nDisco: DIsk-based Storage and Checkpointing with Optimized prefetching\n\n\nDiskOffloadManager\nManages offloaded tensors and handles prefetching in a separate thread.\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco()\nDisco: DIsk-based Storage and Checkpointing with Optimized prefetching\nAdvanced disk-based gradient checkpointer with prefetching.\n\n\n\n\n\nName\nDescription\n\n\n\n\nbackward\nBackward pass that loads activations from disk with prefetching\n\n\nforward\nForward pass that offloads activations to disk asynchronously\n\n\nget_instance\nGet or create the offload manager\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco.backward(\n ctx,\n *grad_outputs,\n)\nBackward pass that loads activations from disk with prefetching\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco.forward(\n ctx,\n forward_function,\n hidden_states,\n *args,\n prefetch_size=1,\n prefetch_to_gpu=True,\n save_workers=4,\n)\nForward pass that offloads activations to disk asynchronously\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.Disco.get_instance(\n prefetch_size=1,\n prefetch_to_gpu=True,\n save_workers=4,\n)\nGet or create the offload manager\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager(\n prefetch_size=3,\n prefetch_to_gpu=True,\n save_workers=4,\n)\nManages offloaded tensors and handles prefetching in a separate thread.\nIncludes synchronization to prevent race conditions.\n\n\n\n\n\nName\nDescription\n\n\n\n\ncleanup\nClean up all temp files and stop prefetch thread with proper synchronization\n\n\ncleanup_tensor\nClean up a specific tensor file after it’s been used\n\n\nload_tensor\nLoad tensor from disk or prefetch cache with proper synchronization\n\n\nsave_tensor\nSave tensor to disk asynchronously and return file path with thread-safe operations\n\n\ntrigger_prefetch\nTrigger prefetching of the next N tensors with proper synchronization\n\n\nwait_for_save\nWait for a tensor to be saved to disk\n\n\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.cleanup()\nClean up all temp files and stop prefetch thread with proper synchronization\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.cleanup_tensor(\n file_path,\n)\nClean up a specific tensor file after it’s been used\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.load_tensor(\n file_path,\n target_device='cuda',\n)\nLoad tensor from disk or prefetch cache with proper synchronization\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.save_tensor(\n tensor,\n)\nSave tensor to disk asynchronously and return file path with thread-safe operations\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.trigger_prefetch(\n n=None,\n)\nTrigger prefetching of the next N tensors with proper synchronization\n\n\n\nmonkeypatch.gradient_checkpointing.offload_disk.DiskOffloadManager.wait_for_save(\n file_path,\n timeout=None,\n)\nWait for a tensor to be saved to disk" }, { "objectID": "docs/api/utils.lora.html", @@ -2769,39 +2790,46 @@ "text": "Name\nDescription\n\n\n\n\npatch_training_loop_for_fsdp\nmonkeypatch for fixing the training loop for fsdp with optimizer save\n\n\n\n\n\nmonkeypatch.trainer_fsdp_optim.patch_training_loop_for_fsdp()\nmonkeypatch for fixing the training loop for fsdp with optimizer save" }, { - "objectID": "docs/api/prompt_strategies.stepwise_supervised.html", - "href": "docs/api/prompt_strategies.stepwise_supervised.html", - "title": "prompt_strategies.stepwise_supervised", + "objectID": "docs/api/core.builders.rl.html", + "href": "docs/api/core.builders.rl.html", + "title": "core.builders.rl", "section": "", - "text": "prompt_strategies.stepwise_supervised\nModule for stepwise datasets, typically including a prompt and reasoning traces,\nand (optionally) per-step, or per-prompt-trace labels for reward modelling.\n\n\n\n\n\nName\nDescription\n\n\n\n\nStepwiseSupervisedPromptTokenizingStrategy\nTokenizing strategy for supervised stepwise datasets, typically used for COT-reasoning.\n\n\n\n\n\nprompt_strategies.stepwise_supervised.StepwiseSupervisedPromptTokenizingStrategy(\n self,\n tokenizer,\n sequence_len=2048,\n step_separator='\\n',\n max_completion_length=None,\n train_on_last_step_only=False,\n)\nTokenizing strategy for supervised stepwise datasets, typically used for COT-reasoning.\nThese datasets should include the following columns:\n- prompt: the prompt text\n- completions: a list of n completion steps\n- labels: a list of n labels indicating the “correctness” of each step" + "text": "core.builders.rl\nBuilder for RLHF trainers\n\n\n\n\n\nName\nDescription\n\n\n\n\nHFPPOTrainerBuilder\nHF Factory class for PPO Trainer\n\n\nHFRLTrainerBuilder\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)\n\n\n\n\n\ncore.builders.rl.HFPPOTrainerBuilder(cfg, model, tokenizer, processor=None)\nHF Factory class for PPO Trainer\n\n\n\ncore.builders.rl.HFRLTrainerBuilder(cfg, model, tokenizer, processor=None)\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)" }, { - "objectID": "docs/api/prompt_strategies.stepwise_supervised.html#classes", - "href": "docs/api/prompt_strategies.stepwise_supervised.html#classes", - "title": "prompt_strategies.stepwise_supervised", + "objectID": "docs/api/core.builders.rl.html#classes", + "href": "docs/api/core.builders.rl.html#classes", + "title": "core.builders.rl", "section": "", - "text": "Name\nDescription\n\n\n\n\nStepwiseSupervisedPromptTokenizingStrategy\nTokenizing strategy for supervised stepwise datasets, typically used for COT-reasoning.\n\n\n\n\n\nprompt_strategies.stepwise_supervised.StepwiseSupervisedPromptTokenizingStrategy(\n self,\n tokenizer,\n sequence_len=2048,\n step_separator='\\n',\n max_completion_length=None,\n train_on_last_step_only=False,\n)\nTokenizing strategy for supervised stepwise datasets, typically used for COT-reasoning.\nThese datasets should include the following columns:\n- prompt: the prompt text\n- completions: a list of n completion steps\n- labels: a list of n labels indicating the “correctness” of each step" + "text": "Name\nDescription\n\n\n\n\nHFPPOTrainerBuilder\nHF Factory class for PPO Trainer\n\n\nHFRLTrainerBuilder\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)\n\n\n\n\n\ncore.builders.rl.HFPPOTrainerBuilder(cfg, model, tokenizer, processor=None)\nHF Factory class for PPO Trainer\n\n\n\ncore.builders.rl.HFRLTrainerBuilder(cfg, model, tokenizer, processor=None)\nTrainer factory class for TRL-based RLHF trainers (e.g. DPO)" }, { - "objectID": "docs/api/common.datasets.html", - "href": "docs/api/common.datasets.html", - "title": "common.datasets", + "objectID": "docs/api/loaders.tokenizer.html", + "href": "docs/api/loaders.tokenizer.html", + "title": "loaders.tokenizer", "section": "", - "text": "common.datasets\nDataset loading utilities.\n\n\n\n\n\nName\nDescription\n\n\n\n\nTrainDatasetMeta\nDataclass with fields for training and validation datasets and metadata.\n\n\n\n\n\ncommon.datasets.TrainDatasetMeta(\n self,\n train_dataset,\n eval_dataset=None,\n total_num_steps=None,\n)\nDataclass with fields for training and validation datasets and metadata.\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nload_datasets\nLoads one or more training or evaluation datasets, calling\n\n\nload_preference_datasets\nLoads one or more training or evaluation datasets for RL training using paired\n\n\nsample_dataset\nRandomly sample num_samples samples from dataset.\n\n\n\n\n\ncommon.datasets.load_datasets(cfg, cli_args=None, debug=False)\nLoads one or more training or evaluation datasets, calling\naxolotl.utils.data.prepare_dataset. Optionally, logs out debug information.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ncli_args\nPreprocessCliArgs | TrainerCliArgs | None\nCommand-specific CLI arguments.\nNone\n\n\ndebug\nbool\nWhether to print out tokenization of sample\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainDatasetMeta\nDataclass with fields for training and evaluation datasets and the computed\n\n\n\nTrainDatasetMeta\ntotal_num_steps.\n\n\n\n\n\n\n\ncommon.datasets.load_preference_datasets(cfg, cli_args)\nLoads one or more training or evaluation datasets for RL training using paired\npreference data, calling axolotl.utils.data.rl.load_prepare_preference_datasets.\nOptionally, logs out debug information.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ncli_args\nUnion[PreprocessCliArgs, TrainerCliArgs]\nCommand-specific CLI arguments.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainDatasetMeta\nDataclass with fields for training and evaluation datasets and the computed\n\n\n\nTrainDatasetMeta\ntotal_num_steps.\n\n\n\n\n\n\n\ncommon.datasets.sample_dataset(dataset, num_samples)\nRandomly sample num_samples samples from dataset.\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ndataset\nDataset\nDataset.\nrequired\n\n\nnum_samples\nint\nNumber of samples to return.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nDataset\nRandom sample (with replacement) of examples in dataset." + "text": "loaders.tokenizer\nTokenizer loading functionality and associated utils\n\n\n\n\n\nName\nDescription\n\n\n\n\nload_tokenizer\nLoad and configure the tokenizer based on the provided config.\n\n\nmodify_tokenizer_files\nModify tokenizer files to replace added_tokens strings, save to output directory,\n\n\n\n\n\nloaders.tokenizer.load_tokenizer(cfg)\nLoad and configure the tokenizer based on the provided config.\n\n\n\nloaders.tokenizer.modify_tokenizer_files(\n tokenizer_path,\n token_mappings,\n output_dir,\n)\nModify tokenizer files to replace added_tokens strings, save to output directory,\nand return the path to the modified tokenizer.\nThis only works with reserved tokens that were added to the tokenizer, not tokens\nalready part of the vocab.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntokenizer_path\nstr\nPath or name of the original tokenizer\nrequired\n\n\ntoken_mappings\ndict[int, str]\nDict mapping {token_id (int): new_token_string}\nrequired\n\n\noutput_dir\nstr\nDirectory to save the modified tokenizer\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nstr\nPath to the modified tokenizer directory\n\n\n\nRef: https://github.com/huggingface/transformers/issues/27974#issuecomment-1854188941" }, { - "objectID": "docs/api/common.datasets.html#classes", - "href": "docs/api/common.datasets.html#classes", - "title": "common.datasets", + "objectID": "docs/api/loaders.tokenizer.html#functions", + "href": "docs/api/loaders.tokenizer.html#functions", + "title": "loaders.tokenizer", "section": "", - "text": "Name\nDescription\n\n\n\n\nTrainDatasetMeta\nDataclass with fields for training and validation datasets and metadata.\n\n\n\n\n\ncommon.datasets.TrainDatasetMeta(\n self,\n train_dataset,\n eval_dataset=None,\n total_num_steps=None,\n)\nDataclass with fields for training and validation datasets and metadata." + "text": "Name\nDescription\n\n\n\n\nload_tokenizer\nLoad and configure the tokenizer based on the provided config.\n\n\nmodify_tokenizer_files\nModify tokenizer files to replace added_tokens strings, save to output directory,\n\n\n\n\n\nloaders.tokenizer.load_tokenizer(cfg)\nLoad and configure the tokenizer based on the provided config.\n\n\n\nloaders.tokenizer.modify_tokenizer_files(\n tokenizer_path,\n token_mappings,\n output_dir,\n)\nModify tokenizer files to replace added_tokens strings, save to output directory,\nand return the path to the modified tokenizer.\nThis only works with reserved tokens that were added to the tokenizer, not tokens\nalready part of the vocab.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ntokenizer_path\nstr\nPath or name of the original tokenizer\nrequired\n\n\ntoken_mappings\ndict[int, str]\nDict mapping {token_id (int): new_token_string}\nrequired\n\n\noutput_dir\nstr\nDirectory to save the modified tokenizer\nrequired\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nstr\nPath to the modified tokenizer directory\n\n\n\nRef: https://github.com/huggingface/transformers/issues/27974#issuecomment-1854188941" }, { - "objectID": "docs/api/common.datasets.html#functions", - "href": "docs/api/common.datasets.html#functions", - "title": "common.datasets", + "objectID": "docs/api/utils.callbacks.profiler.html", + "href": "docs/api/utils.callbacks.profiler.html", + "title": "utils.callbacks.profiler", "section": "", - "text": "Name\nDescription\n\n\n\n\nload_datasets\nLoads one or more training or evaluation datasets, calling\n\n\nload_preference_datasets\nLoads one or more training or evaluation datasets for RL training using paired\n\n\nsample_dataset\nRandomly sample num_samples samples from dataset.\n\n\n\n\n\ncommon.datasets.load_datasets(cfg, cli_args=None, debug=False)\nLoads one or more training or evaluation datasets, calling\naxolotl.utils.data.prepare_dataset. Optionally, logs out debug information.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ncli_args\nPreprocessCliArgs | TrainerCliArgs | None\nCommand-specific CLI arguments.\nNone\n\n\ndebug\nbool\nWhether to print out tokenization of sample\nFalse\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainDatasetMeta\nDataclass with fields for training and evaluation datasets and the computed\n\n\n\nTrainDatasetMeta\ntotal_num_steps.\n\n\n\n\n\n\n\ncommon.datasets.load_preference_datasets(cfg, cli_args)\nLoads one or more training or evaluation datasets for RL training using paired\npreference data, calling axolotl.utils.data.rl.load_prepare_preference_datasets.\nOptionally, logs out debug information.\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ncfg\nDictDefault\nDictionary mapping axolotl config keys to values.\nrequired\n\n\ncli_args\nUnion[PreprocessCliArgs, TrainerCliArgs]\nCommand-specific CLI arguments.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nTrainDatasetMeta\nDataclass with fields for training and evaluation datasets and the computed\n\n\n\nTrainDatasetMeta\ntotal_num_steps.\n\n\n\n\n\n\n\ncommon.datasets.sample_dataset(dataset, num_samples)\nRandomly sample num_samples samples from dataset.\n\n\n\n\n\nName\nType\nDescription\nDefault\n\n\n\n\ndataset\nDataset\nDataset.\nrequired\n\n\nnum_samples\nint\nNumber of samples to return.\nrequired\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nName\nType\nDescription\n\n\n\n\n\nDataset\nRandom sample (with replacement) of examples in dataset." + "text": "utils.callbacks.profiler\nHF Trainer callback for creating pytorch profiling snapshots\n\n\n\n\n\nName\nDescription\n\n\n\n\nPytorchProfilerCallback\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps.\n\n\n\n\n\nutils.callbacks.profiler.PytorchProfilerCallback(steps_to_profile=5)\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps." + }, + { + "objectID": "docs/api/utils.callbacks.profiler.html#classes", + "href": "docs/api/utils.callbacks.profiler.html#classes", + "title": "utils.callbacks.profiler", + "section": "", + "text": "Name\nDescription\n\n\n\n\nPytorchProfilerCallback\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps.\n\n\n\n\n\nutils.callbacks.profiler.PytorchProfilerCallback(steps_to_profile=5)\nPyTorch Profiler callback to create snapshots of GPU memory usage at specified steps." }, { "objectID": "docs/api/prompt_strategies.alpaca_instruct.html", @@ -2822,14 +2850,14 @@ "href": "docs/api/monkeypatch.attention.mllama.html", "title": "monkeypatch.attention.mllama", "section": "", - "text": "monkeypatch.attention.mllama\nMonkeypatch for Vision Llama for FA2 support\n\n\n\n\n\nName\nDescription\n\n\n\n\nMllamaTextCrossFlashAttention2\nMllama flash cross-attention module. This module inherits from MllamaTextCrossAttention and\n\n\nMllamaTextSelfFlashAttention2\nMllama flash self-attention module. This module inherits from MllamaTextSelfAttention and\n\n\n\n\n\nmonkeypatch.attention.mllama.MllamaTextCrossFlashAttention2(\n self,\n *args,\n **kwargs,\n)\nMllama flash cross-attention module. This module inherits from MllamaTextCrossAttention and\nimplements the forward pass using Flash Attention for improved performance.\n\n\n\nmonkeypatch.attention.mllama.MllamaTextSelfFlashAttention2(\n self,\n config,\n layer_idx,\n *args,\n **kwargs,\n)\nMllama flash self-attention module. This module inherits from MllamaTextSelfAttention and\nimplements the forward pass using Flash Attention for improved performance." + "text": "monkeypatch.attention.mllama\nMonkeypatch for Vision Llama for FA2 support\n\n\n\n\n\nName\nDescription\n\n\n\n\nMllamaTextCrossFlashAttention2\nMllama flash cross-attention module. This module inherits from MllamaTextCrossAttention and\n\n\nMllamaTextSelfFlashAttention2\nMllama flash self-attention module. This module inherits from MllamaTextSelfAttention and\n\n\n\n\n\nmonkeypatch.attention.mllama.MllamaTextCrossFlashAttention2(*args, **kwargs)\nMllama flash cross-attention module. This module inherits from MllamaTextCrossAttention and\nimplements the forward pass using Flash Attention for improved performance.\n\n\n\nmonkeypatch.attention.mllama.MllamaTextSelfFlashAttention2(\n config,\n layer_idx,\n *args,\n **kwargs,\n)\nMllama flash self-attention module. This module inherits from MllamaTextSelfAttention and\nimplements the forward pass using Flash Attention for improved performance." }, { "objectID": "docs/api/monkeypatch.attention.mllama.html#classes", "href": "docs/api/monkeypatch.attention.mllama.html#classes", "title": "monkeypatch.attention.mllama", "section": "", - "text": "Name\nDescription\n\n\n\n\nMllamaTextCrossFlashAttention2\nMllama flash cross-attention module. This module inherits from MllamaTextCrossAttention and\n\n\nMllamaTextSelfFlashAttention2\nMllama flash self-attention module. This module inherits from MllamaTextSelfAttention and\n\n\n\n\n\nmonkeypatch.attention.mllama.MllamaTextCrossFlashAttention2(\n self,\n *args,\n **kwargs,\n)\nMllama flash cross-attention module. This module inherits from MllamaTextCrossAttention and\nimplements the forward pass using Flash Attention for improved performance.\n\n\n\nmonkeypatch.attention.mllama.MllamaTextSelfFlashAttention2(\n self,\n config,\n layer_idx,\n *args,\n **kwargs,\n)\nMllama flash self-attention module. This module inherits from MllamaTextSelfAttention and\nimplements the forward pass using Flash Attention for improved performance." + "text": "Name\nDescription\n\n\n\n\nMllamaTextCrossFlashAttention2\nMllama flash cross-attention module. This module inherits from MllamaTextCrossAttention and\n\n\nMllamaTextSelfFlashAttention2\nMllama flash self-attention module. This module inherits from MllamaTextSelfAttention and\n\n\n\n\n\nmonkeypatch.attention.mllama.MllamaTextCrossFlashAttention2(*args, **kwargs)\nMllama flash cross-attention module. This module inherits from MllamaTextCrossAttention and\nimplements the forward pass using Flash Attention for improved performance.\n\n\n\nmonkeypatch.attention.mllama.MllamaTextSelfFlashAttention2(\n config,\n layer_idx,\n *args,\n **kwargs,\n)\nMllama flash self-attention module. This module inherits from MllamaTextSelfAttention and\nimplements the forward pass using Flash Attention for improved performance." }, { "objectID": "docs/api/prompt_strategies.dpo.zephyr.html", @@ -2871,14 +2899,14 @@ "href": "docs/api/prompt_strategies.orcamini.html", "title": "prompt_strategies.orcamini", "section": "", - "text": "prompt_strategies.orcamini\nPrompt Strategy for finetuning Orca Mini (v2) models\nsee also https://huggingface.co/psmathur/orca_mini_v2_7b for more information\nUse dataset type: orcamini in conig.yml to use this prompt style.\nCompared to the alpaca_w_system.open_orca dataset type,\nthis one specifies the system prompt with “### System:”.\nNot suited/tested for multiple-turn conversations without further adjustments.\n\n\n\n\n\nName\nDescription\n\n\n\n\nOrcaMiniPrompter\nAdjusted Prompter for Orca Mini (v2) datasets\n\n\n\n\n\nprompt_strategies.orcamini.OrcaMiniPrompter(\n self,\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAdjusted Prompter for Orca Mini (v2) datasets" + "text": "prompt_strategies.orcamini\nPrompt Strategy for finetuning Orca Mini (v2) models\nsee also https://huggingface.co/psmathur/orca_mini_v2_7b for more information\nUse dataset type: orcamini in conig.yml to use this prompt style.\nCompared to the alpaca_w_system.open_orca dataset type,\nthis one specifies the system prompt with “### System:”.\nNot suited/tested for multiple-turn conversations without further adjustments.\n\n\n\n\n\nName\nDescription\n\n\n\n\nOrcaMiniPrompter\nAdjusted Prompter for Orca Mini (v2) datasets\n\n\n\n\n\nprompt_strategies.orcamini.OrcaMiniPrompter(\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAdjusted Prompter for Orca Mini (v2) datasets" }, { "objectID": "docs/api/prompt_strategies.orcamini.html#classes", "href": "docs/api/prompt_strategies.orcamini.html#classes", "title": "prompt_strategies.orcamini", "section": "", - "text": "Name\nDescription\n\n\n\n\nOrcaMiniPrompter\nAdjusted Prompter for Orca Mini (v2) datasets\n\n\n\n\n\nprompt_strategies.orcamini.OrcaMiniPrompter(\n self,\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAdjusted Prompter for Orca Mini (v2) datasets" + "text": "Name\nDescription\n\n\n\n\nOrcaMiniPrompter\nAdjusted Prompter for Orca Mini (v2) datasets\n\n\n\n\n\nprompt_strategies.orcamini.OrcaMiniPrompter(\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAdjusted Prompter for Orca Mini (v2) datasets" }, { "objectID": "docs/api/cli.evaluate.html", @@ -2997,14 +3025,14 @@ "href": "docs/api/prompt_strategies.alpaca_chat.html", "title": "prompt_strategies.alpaca_chat", "section": "", - "text": "prompt_strategies.alpaca_chat\nModule for Alpaca prompt strategy classes\n\n\n\n\n\nName\nDescription\n\n\n\n\nAlpacaChatPrompter\nAlpaca Chat Prompter extending the system prompt to for chat-instruct answers\n\n\nAlpacaConcisePrompter\nAlpaca Prompter extending the system prompt to ask for concise chat-instruct answers\n\n\nAlpacaQAPromptTokenizingStrategy\nTokenizing strategy for AlpacaQA\n\n\nCamelAIPromptTokenizingStrategy\nTokenizing strategy for CamelAI datasets\n\n\nNoSystemPrompter\nNull Prompter with no system prompts\n\n\n\n\n\nprompt_strategies.alpaca_chat.AlpacaChatPrompter(self)\nAlpaca Chat Prompter extending the system prompt to for chat-instruct answers\n\n\n\nprompt_strategies.alpaca_chat.AlpacaConcisePrompter(\n self,\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAlpaca Prompter extending the system prompt to ask for concise chat-instruct answers\n\n\n\nprompt_strategies.alpaca_chat.AlpacaQAPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for AlpacaQA\n\n\n\nprompt_strategies.alpaca_chat.CamelAIPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for CamelAI datasets\n\n\n\nprompt_strategies.alpaca_chat.NoSystemPrompter(self)\nNull Prompter with no system prompts" + "text": "prompt_strategies.alpaca_chat\nModule for Alpaca prompt strategy classes\n\n\n\n\n\nName\nDescription\n\n\n\n\nAlpacaChatPrompter\nAlpaca Chat Prompter extending the system prompt to for chat-instruct answers\n\n\nAlpacaConcisePrompter\nAlpaca Prompter extending the system prompt to ask for concise chat-instruct answers\n\n\nAlpacaQAPromptTokenizingStrategy\nTokenizing strategy for AlpacaQA\n\n\nCamelAIPromptTokenizingStrategy\nTokenizing strategy for CamelAI datasets\n\n\nNoSystemPrompter\nNull Prompter with no system prompts\n\n\n\n\n\nprompt_strategies.alpaca_chat.AlpacaChatPrompter()\nAlpaca Chat Prompter extending the system prompt to for chat-instruct answers\n\n\n\nprompt_strategies.alpaca_chat.AlpacaConcisePrompter(\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAlpaca Prompter extending the system prompt to ask for concise chat-instruct answers\n\n\n\nprompt_strategies.alpaca_chat.AlpacaQAPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for AlpacaQA\n\n\n\nprompt_strategies.alpaca_chat.CamelAIPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for CamelAI datasets\n\n\n\nprompt_strategies.alpaca_chat.NoSystemPrompter()\nNull Prompter with no system prompts" }, { "objectID": "docs/api/prompt_strategies.alpaca_chat.html#classes", "href": "docs/api/prompt_strategies.alpaca_chat.html#classes", "title": "prompt_strategies.alpaca_chat", "section": "", - "text": "Name\nDescription\n\n\n\n\nAlpacaChatPrompter\nAlpaca Chat Prompter extending the system prompt to for chat-instruct answers\n\n\nAlpacaConcisePrompter\nAlpaca Prompter extending the system prompt to ask for concise chat-instruct answers\n\n\nAlpacaQAPromptTokenizingStrategy\nTokenizing strategy for AlpacaQA\n\n\nCamelAIPromptTokenizingStrategy\nTokenizing strategy for CamelAI datasets\n\n\nNoSystemPrompter\nNull Prompter with no system prompts\n\n\n\n\n\nprompt_strategies.alpaca_chat.AlpacaChatPrompter(self)\nAlpaca Chat Prompter extending the system prompt to for chat-instruct answers\n\n\n\nprompt_strategies.alpaca_chat.AlpacaConcisePrompter(\n self,\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAlpaca Prompter extending the system prompt to ask for concise chat-instruct answers\n\n\n\nprompt_strategies.alpaca_chat.AlpacaQAPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for AlpacaQA\n\n\n\nprompt_strategies.alpaca_chat.CamelAIPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for CamelAI datasets\n\n\n\nprompt_strategies.alpaca_chat.NoSystemPrompter(self)\nNull Prompter with no system prompts" + "text": "Name\nDescription\n\n\n\n\nAlpacaChatPrompter\nAlpaca Chat Prompter extending the system prompt to for chat-instruct answers\n\n\nAlpacaConcisePrompter\nAlpaca Prompter extending the system prompt to ask for concise chat-instruct answers\n\n\nAlpacaQAPromptTokenizingStrategy\nTokenizing strategy for AlpacaQA\n\n\nCamelAIPromptTokenizingStrategy\nTokenizing strategy for CamelAI datasets\n\n\nNoSystemPrompter\nNull Prompter with no system prompts\n\n\n\n\n\nprompt_strategies.alpaca_chat.AlpacaChatPrompter()\nAlpaca Chat Prompter extending the system prompt to for chat-instruct answers\n\n\n\nprompt_strategies.alpaca_chat.AlpacaConcisePrompter(\n prompt_style=PromptStyle.INSTRUCT.value,\n)\nAlpaca Prompter extending the system prompt to ask for concise chat-instruct answers\n\n\n\nprompt_strategies.alpaca_chat.AlpacaQAPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for AlpacaQA\n\n\n\nprompt_strategies.alpaca_chat.CamelAIPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for CamelAI datasets\n\n\n\nprompt_strategies.alpaca_chat.NoSystemPrompter()\nNull Prompter with no system prompts" }, { "objectID": "docs/api/prompt_strategies.kto.llama3.html", @@ -3025,14 +3053,14 @@ "href": "docs/api/integrations.kd.trainer.html", "title": "integrations.kd.trainer", "section": "", - "text": "integrations.kd.trainer\nKD trainer\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlKDTrainer\nCustom trainer subclass for Knowledge Distillation (KD)\n\n\n\n\n\nintegrations.kd.trainer.AxolotlKDTrainer(\n self,\n *_args,\n bench_data_collator=None,\n eval_data_collator=None,\n dataset_tags=None,\n **kwargs,\n)\nCustom trainer subclass for Knowledge Distillation (KD)\n\n\n\n\n\nName\nDescription\n\n\n\n\ncompute_loss\nHow the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n\n\n\n\nintegrations.kd.trainer.AxolotlKDTrainer.compute_loss(\n model,\n inputs,\n return_outputs=False,\n num_items_in_batch=None,\n)\nHow the loss is computed by Trainer. By default, all models return the loss in the first element.\nSubclass and override for custom behavior." + "text": "integrations.kd.trainer\nKD trainer\n\n\n\n\n\nName\nDescription\n\n\n\n\nAxolotlKDTrainer\nCustom trainer subclass for Knowledge Distillation (KD)\n\n\n\n\n\nintegrations.kd.trainer.AxolotlKDTrainer(\n *_args,\n bench_data_collator=None,\n eval_data_collator=None,\n dataset_tags=None,\n **kwargs,\n)\nCustom trainer subclass for Knowledge Distillation (KD)\n\n\n\n\n\nName\nDescription\n\n\n\n\ncompute_loss\nHow the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n\n\n\n\nintegrations.kd.trainer.AxolotlKDTrainer.compute_loss(\n model,\n inputs,\n return_outputs=False,\n num_items_in_batch=None,\n)\nHow the loss is computed by Trainer. By default, all models return the loss in the first element.\nSubclass and override for custom behavior." }, { "objectID": "docs/api/integrations.kd.trainer.html#classes", "href": "docs/api/integrations.kd.trainer.html#classes", "title": "integrations.kd.trainer", "section": "", - "text": "Name\nDescription\n\n\n\n\nAxolotlKDTrainer\nCustom trainer subclass for Knowledge Distillation (KD)\n\n\n\n\n\nintegrations.kd.trainer.AxolotlKDTrainer(\n self,\n *_args,\n bench_data_collator=None,\n eval_data_collator=None,\n dataset_tags=None,\n **kwargs,\n)\nCustom trainer subclass for Knowledge Distillation (KD)\n\n\n\n\n\nName\nDescription\n\n\n\n\ncompute_loss\nHow the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n\n\n\n\nintegrations.kd.trainer.AxolotlKDTrainer.compute_loss(\n model,\n inputs,\n return_outputs=False,\n num_items_in_batch=None,\n)\nHow the loss is computed by Trainer. By default, all models return the loss in the first element.\nSubclass and override for custom behavior." + "text": "Name\nDescription\n\n\n\n\nAxolotlKDTrainer\nCustom trainer subclass for Knowledge Distillation (KD)\n\n\n\n\n\nintegrations.kd.trainer.AxolotlKDTrainer(\n *_args,\n bench_data_collator=None,\n eval_data_collator=None,\n dataset_tags=None,\n **kwargs,\n)\nCustom trainer subclass for Knowledge Distillation (KD)\n\n\n\n\n\nName\nDescription\n\n\n\n\ncompute_loss\nHow the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n\n\n\n\nintegrations.kd.trainer.AxolotlKDTrainer.compute_loss(\n model,\n inputs,\n return_outputs=False,\n num_items_in_batch=None,\n)\nHow the loss is computed by Trainer. By default, all models return the loss in the first element.\nSubclass and override for custom behavior." }, { "objectID": "docs/api/utils.quantization.html", @@ -3053,14 +3081,14 @@ "href": "docs/api/prompt_tokenizers.html", "title": "prompt_tokenizers", "section": "", - "text": "prompt_tokenizers\nModule containing PromptTokenizingStrategy and Prompter classes\n\n\n\n\n\nName\nDescription\n\n\n\n\nAlpacaMultipleChoicePromptTokenizingStrategy\nTokenizing strategy for Alpaca Multiple Choice prompts.\n\n\nAlpacaPromptTokenizingStrategy\nTokenizing strategy for Alpaca prompts.\n\n\nAlpacaReflectionPTStrategy\nTokenizing strategy for Alpaca Reflection prompts.\n\n\nDatasetWrappingStrategy\nAbstract class for wrapping datasets for Chat Messages\n\n\nGPTeacherPromptTokenizingStrategy\nTokenizing strategy for GPTeacher prompts.\n\n\nInstructionPromptTokenizingStrategy\nTokenizing strategy for instruction-based prompts.\n\n\nInvalidDataException\nException raised when the data is invalid\n\n\nJeopardyPromptTokenizingStrategy\nTokenizing strategy for Jeopardy prompts.\n\n\nNomicGPT4AllPromptTokenizingStrategy\nTokenizing strategy for NomicGPT4All prompts.\n\n\nOpenAssistantPromptTokenizingStrategy\nTokenizing strategy for OpenAssistant prompts.\n\n\nPromptTokenizingStrategy\nAbstract class for tokenizing strategies\n\n\nReflectionPromptTokenizingStrategy\nTokenizing strategy for Reflection prompts.\n\n\nSummarizeTLDRPromptTokenizingStrategy\nTokenizing strategy for SummarizeTLDR prompts.\n\n\n\n\n\nprompt_tokenizers.AlpacaMultipleChoicePromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Alpaca Multiple Choice prompts.\n\n\n\nprompt_tokenizers.AlpacaPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Alpaca prompts.\n\n\n\nprompt_tokenizers.AlpacaReflectionPTStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Alpaca Reflection prompts.\n\n\n\nprompt_tokenizers.DatasetWrappingStrategy()\nAbstract class for wrapping datasets for Chat Messages\n\n\n\nprompt_tokenizers.GPTeacherPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for GPTeacher prompts.\n\n\n\nprompt_tokenizers.InstructionPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for instruction-based prompts.\n\n\n\nprompt_tokenizers.InvalidDataException()\nException raised when the data is invalid\n\n\n\nprompt_tokenizers.JeopardyPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Jeopardy prompts.\n\n\n\nprompt_tokenizers.NomicGPT4AllPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for NomicGPT4All prompts.\n\n\n\nprompt_tokenizers.OpenAssistantPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for OpenAssistant prompts.\n\n\n\nprompt_tokenizers.PromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nAbstract class for tokenizing strategies\n\n\n\nprompt_tokenizers.ReflectionPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Reflection prompts.\n\n\n\nprompt_tokenizers.SummarizeTLDRPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for SummarizeTLDR prompts.\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nparse_tokenized_to_result\nParses the tokenized prompt and append the tokenized input_ids, attention_mask and labels to the result\n\n\ntokenize_prompt_default\nReturns the default values for the tokenize prompt function\n\n\n\n\n\nprompt_tokenizers.parse_tokenized_to_result(\n result,\n current_len,\n res,\n labels,\n pad_token_id=None,\n)\nParses the tokenized prompt and append the tokenized input_ids, attention_mask and labels to the result\n\n\n\nprompt_tokenizers.tokenize_prompt_default()\nReturns the default values for the tokenize prompt function" + "text": "prompt_tokenizers\nModule containing PromptTokenizingStrategy and Prompter classes\n\n\n\n\n\nName\nDescription\n\n\n\n\nAlpacaMultipleChoicePromptTokenizingStrategy\nTokenizing strategy for Alpaca Multiple Choice prompts.\n\n\nAlpacaPromptTokenizingStrategy\nTokenizing strategy for Alpaca prompts.\n\n\nAlpacaReflectionPTStrategy\nTokenizing strategy for Alpaca Reflection prompts.\n\n\nDatasetWrappingStrategy\nAbstract class for wrapping datasets for Chat Messages\n\n\nGPTeacherPromptTokenizingStrategy\nTokenizing strategy for GPTeacher prompts.\n\n\nInstructionPromptTokenizingStrategy\nTokenizing strategy for instruction-based prompts.\n\n\nInvalidDataException\nException raised when the data is invalid\n\n\nJeopardyPromptTokenizingStrategy\nTokenizing strategy for Jeopardy prompts.\n\n\nNomicGPT4AllPromptTokenizingStrategy\nTokenizing strategy for NomicGPT4All prompts.\n\n\nOpenAssistantPromptTokenizingStrategy\nTokenizing strategy for OpenAssistant prompts.\n\n\nPromptTokenizingStrategy\nAbstract class for tokenizing strategies\n\n\nReflectionPromptTokenizingStrategy\nTokenizing strategy for Reflection prompts.\n\n\nSummarizeTLDRPromptTokenizingStrategy\nTokenizing strategy for SummarizeTLDR prompts.\n\n\n\n\n\nprompt_tokenizers.AlpacaMultipleChoicePromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Alpaca Multiple Choice prompts.\n\n\n\nprompt_tokenizers.AlpacaPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Alpaca prompts.\n\n\n\nprompt_tokenizers.AlpacaReflectionPTStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Alpaca Reflection prompts.\n\n\n\nprompt_tokenizers.DatasetWrappingStrategy()\nAbstract class for wrapping datasets for Chat Messages\n\n\n\nprompt_tokenizers.GPTeacherPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for GPTeacher prompts.\n\n\n\nprompt_tokenizers.InstructionPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for instruction-based prompts.\n\n\n\nprompt_tokenizers.InvalidDataException()\nException raised when the data is invalid\n\n\n\nprompt_tokenizers.JeopardyPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Jeopardy prompts.\n\n\n\nprompt_tokenizers.NomicGPT4AllPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for NomicGPT4All prompts.\n\n\n\nprompt_tokenizers.OpenAssistantPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for OpenAssistant prompts.\n\n\n\nprompt_tokenizers.PromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nAbstract class for tokenizing strategies\n\n\n\nprompt_tokenizers.ReflectionPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Reflection prompts.\n\n\n\nprompt_tokenizers.SummarizeTLDRPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for SummarizeTLDR prompts.\n\n\n\n\n\n\n\nName\nDescription\n\n\n\n\nparse_tokenized_to_result\nParses the tokenized prompt and append the tokenized input_ids, attention_mask and labels to the result\n\n\ntokenize_prompt_default\nReturns the default values for the tokenize prompt function\n\n\n\n\n\nprompt_tokenizers.parse_tokenized_to_result(\n result,\n current_len,\n res,\n labels,\n pad_token_id=None,\n)\nParses the tokenized prompt and append the tokenized input_ids, attention_mask and labels to the result\n\n\n\nprompt_tokenizers.tokenize_prompt_default()\nReturns the default values for the tokenize prompt function" }, { "objectID": "docs/api/prompt_tokenizers.html#classes", "href": "docs/api/prompt_tokenizers.html#classes", "title": "prompt_tokenizers", "section": "", - "text": "Name\nDescription\n\n\n\n\nAlpacaMultipleChoicePromptTokenizingStrategy\nTokenizing strategy for Alpaca Multiple Choice prompts.\n\n\nAlpacaPromptTokenizingStrategy\nTokenizing strategy for Alpaca prompts.\n\n\nAlpacaReflectionPTStrategy\nTokenizing strategy for Alpaca Reflection prompts.\n\n\nDatasetWrappingStrategy\nAbstract class for wrapping datasets for Chat Messages\n\n\nGPTeacherPromptTokenizingStrategy\nTokenizing strategy for GPTeacher prompts.\n\n\nInstructionPromptTokenizingStrategy\nTokenizing strategy for instruction-based prompts.\n\n\nInvalidDataException\nException raised when the data is invalid\n\n\nJeopardyPromptTokenizingStrategy\nTokenizing strategy for Jeopardy prompts.\n\n\nNomicGPT4AllPromptTokenizingStrategy\nTokenizing strategy for NomicGPT4All prompts.\n\n\nOpenAssistantPromptTokenizingStrategy\nTokenizing strategy for OpenAssistant prompts.\n\n\nPromptTokenizingStrategy\nAbstract class for tokenizing strategies\n\n\nReflectionPromptTokenizingStrategy\nTokenizing strategy for Reflection prompts.\n\n\nSummarizeTLDRPromptTokenizingStrategy\nTokenizing strategy for SummarizeTLDR prompts.\n\n\n\n\n\nprompt_tokenizers.AlpacaMultipleChoicePromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Alpaca Multiple Choice prompts.\n\n\n\nprompt_tokenizers.AlpacaPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Alpaca prompts.\n\n\n\nprompt_tokenizers.AlpacaReflectionPTStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Alpaca Reflection prompts.\n\n\n\nprompt_tokenizers.DatasetWrappingStrategy()\nAbstract class for wrapping datasets for Chat Messages\n\n\n\nprompt_tokenizers.GPTeacherPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for GPTeacher prompts.\n\n\n\nprompt_tokenizers.InstructionPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for instruction-based prompts.\n\n\n\nprompt_tokenizers.InvalidDataException()\nException raised when the data is invalid\n\n\n\nprompt_tokenizers.JeopardyPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Jeopardy prompts.\n\n\n\nprompt_tokenizers.NomicGPT4AllPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for NomicGPT4All prompts.\n\n\n\nprompt_tokenizers.OpenAssistantPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for OpenAssistant prompts.\n\n\n\nprompt_tokenizers.PromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nAbstract class for tokenizing strategies\n\n\n\nprompt_tokenizers.ReflectionPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Reflection prompts.\n\n\n\nprompt_tokenizers.SummarizeTLDRPromptTokenizingStrategy(\n self,\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for SummarizeTLDR prompts." + "text": "Name\nDescription\n\n\n\n\nAlpacaMultipleChoicePromptTokenizingStrategy\nTokenizing strategy for Alpaca Multiple Choice prompts.\n\n\nAlpacaPromptTokenizingStrategy\nTokenizing strategy for Alpaca prompts.\n\n\nAlpacaReflectionPTStrategy\nTokenizing strategy for Alpaca Reflection prompts.\n\n\nDatasetWrappingStrategy\nAbstract class for wrapping datasets for Chat Messages\n\n\nGPTeacherPromptTokenizingStrategy\nTokenizing strategy for GPTeacher prompts.\n\n\nInstructionPromptTokenizingStrategy\nTokenizing strategy for instruction-based prompts.\n\n\nInvalidDataException\nException raised when the data is invalid\n\n\nJeopardyPromptTokenizingStrategy\nTokenizing strategy for Jeopardy prompts.\n\n\nNomicGPT4AllPromptTokenizingStrategy\nTokenizing strategy for NomicGPT4All prompts.\n\n\nOpenAssistantPromptTokenizingStrategy\nTokenizing strategy for OpenAssistant prompts.\n\n\nPromptTokenizingStrategy\nAbstract class for tokenizing strategies\n\n\nReflectionPromptTokenizingStrategy\nTokenizing strategy for Reflection prompts.\n\n\nSummarizeTLDRPromptTokenizingStrategy\nTokenizing strategy for SummarizeTLDR prompts.\n\n\n\n\n\nprompt_tokenizers.AlpacaMultipleChoicePromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Alpaca Multiple Choice prompts.\n\n\n\nprompt_tokenizers.AlpacaPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Alpaca prompts.\n\n\n\nprompt_tokenizers.AlpacaReflectionPTStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Alpaca Reflection prompts.\n\n\n\nprompt_tokenizers.DatasetWrappingStrategy()\nAbstract class for wrapping datasets for Chat Messages\n\n\n\nprompt_tokenizers.GPTeacherPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for GPTeacher prompts.\n\n\n\nprompt_tokenizers.InstructionPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for instruction-based prompts.\n\n\n\nprompt_tokenizers.InvalidDataException()\nException raised when the data is invalid\n\n\n\nprompt_tokenizers.JeopardyPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Jeopardy prompts.\n\n\n\nprompt_tokenizers.NomicGPT4AllPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for NomicGPT4All prompts.\n\n\n\nprompt_tokenizers.OpenAssistantPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for OpenAssistant prompts.\n\n\n\nprompt_tokenizers.PromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nAbstract class for tokenizing strategies\n\n\n\nprompt_tokenizers.ReflectionPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for Reflection prompts.\n\n\n\nprompt_tokenizers.SummarizeTLDRPromptTokenizingStrategy(\n prompter,\n tokenizer,\n train_on_inputs=False,\n sequence_len=2048,\n)\nTokenizing strategy for SummarizeTLDR prompts." }, { "objectID": "docs/api/prompt_tokenizers.html#functions", @@ -3484,7 +3512,7 @@ "href": "docs/config.html", "title": "Config Reference", "section": "", - "text": "# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files\n# This can also be a relative path to a model on disk\nbase_model: ./llama-7b-hf\n# You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)\nbase_model_ignore_patterns:\n# If the base_model repo on hf hub doesn't include configuration .json files,\n# You can set that here, or leave this empty to default to base_model\nbase_model_config: ./llama-7b-hf\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model:\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config:\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too\nmodel_type: AutoModelForCausalLM\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: AutoTokenizer\n# Trust remote code for untrusted source\ntrust_remote_code:\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast:\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy:\n# Resize the model embeddings when new tokens are added to multiples of 32\n# This is reported to improve training speed on some models\nresize_token_embeddings_to_32x:\n# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings:\n# Optional[bool] Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs\nembeddings_skip_upcast:\n# Whether to load the model with randomly initialized weights. Useful for\n# pre-training a model from scratch or debugging purposes.\nrandom_init_weights:\n\n# (Internal use only)\n# Used to identify which the model is based on\nis_falcon_derived_model:\nis_llama_derived_model:\nis_qwen_derived_model:\n# Please note that if you set this to true, `padding_side` will be set to \"left\" by default\nis_mistral_derived_model:\n\n# optional overrides to the base model configuration\noverrides_of_model_config:\n # RoPE Scaling https://github.com/huggingface/transformers/pull/24653\n rope_scaling:\n type: # linear | dynamic\n factor: # float\n\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs:\n # use_cache: False\n\n# optional overrides to the bnb 4bit quantization configuration\n# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig\nbnb_config_kwargs:\n # These are default values\n llm_int8_has_fp16_weight: false\n bnb_4bit_quant_type: nf4\n bnb_4bit_use_double_quant: true\n\n# quantization aware training\nqat:\n activation_dtype: # Optional[str] = \"int8\". Fake quantization layout to use for activation quantization. Valid options are \"int4\" and \"int8\"\n weight_dtype: # Optional[str] = \"int8\". Fake quantization layout to use for weight quantization. Valid options are \"int4\" and \"int8\"\n group_size: # Optional[int] = 32. The number of elements in each group for per-group fake quantization\n fake_quant_after_n_steps: # Optional[int] = None. The number of steps to apply fake quantization after\n\n# post-training quantization\nquantization:\n weight_dtype: # Optional[str] = \"int8\". Fake quantization layout to use for weight quantization. Valid options are uintX for X in [1, 2, 3, 4, 5, 6, 7], or int4, or int8\n activation_dtype: # Optional[str] = \"int8\". Fake quantization layout to use for activation quantization. Valid options are \"int4\" and \"int8\"\n group_size: # Optional[int] = 32. The number of elements in each group for per-group fake quantization\n quantize_embedding: # Optional[bool] = False. Whether to quantize the embedding layer.\n\n\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: true\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: true\n# Use bitsandbytes 4 bit\nload_in_4bit:\n\n# Use CUDA bf16\nbf16: true # bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection. require >=ampere\n# Use CUDA fp16\nfp16: true\n# Use CUDA tf32\ntf32: true # require >=ampere\n# Note: if bf16 is set to 'auto', and fp16 is set to true, we will prefer the explict fp16 setting\n\n# No AMP (automatic mixed precision)\nbfloat16: true # require >=ampere\nfloat16: true\n\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset\ngpu_memory_limit: 20GiB\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: true\n\n# List[str]. Add plugins to extend the pipeline.\n# See `src/axolotl/integrations` for the available plugins or doc below for more details.\n# https://docs.axolotl.ai/docs/custom_integrations.html\nplugins:\n # - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin\n\n# A list of one or more datasets to finetune the model with\n# See https://docs.axolotl.ai/docs/dataset_loading.html for guide on loading datasets\n# See https://docs.axolotl.ai/docs/dataset-formats/ for guide on dataset formats\ndatasets:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n - path: vicgalle/alpaca-gpt4\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>\n ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file\n data_files: # Optional[str] path to source data files\n\n shards: # Optional[int] split dataset into N pieces (use with shards_idx)\n shards_idx: # Optional[int] = 0 the index of sharded dataset to use\n\n preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)\n\n name: # Optional[str] name of dataset configuration to load\n split: train # Optional[str] name of dataset split to load from\n revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets.\n trust_remote_code: # Optional[bool] Trust remote code for untrusted source\n\n # Custom user instruction prompt\n - path: repo\n type:\n # The below are defaults. only set what's needed if you use a different column name.\n system_prompt: \"\"\n system_format: \"{system}\"\n field_system: system\n field_instruction: instruction\n field_input: input\n field_output: output\n\n # Customizable to be single line or multi-line\n # Use {instruction}/{input} as key to be replaced\n # 'format' can include {input}\n format: |-\n User: {instruction} {input}\n Assistant:\n # 'no_input_format' cannot include {input}\n no_input_format: \"{instruction} \"\n\n # For `completion` datsets only, uses the provided field instead of `text` column\n field:\n\n # Using chat template\n - path: ...\n # Set type to `chat_template` to use this strategy\n type: chat_template\n # Specify the name of the chat template to use\n # The name of the chat template to use for training, following values are supported:\n # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default.\n # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml.\n # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n chat_template: tokenizer_default\n\n # Custom jinja chat template. Used only if `chat_template: jinja` or empty.\n chat_template_jinja:\n\n # Key containing the messages (default: \"messages\")\n field_messages: messages\n\n # Key containing the system message (default: \"system\")\n # If the system message is not present in the dataset sample, it will be loaded from the field_system property.\n field_system: system\n\n # Mapping of properties from the input dataset to the chat template.\n # (default: message_property_mappings={'role':'role', 'content':'content'})\n # If a property exists in the template but not in this mapping, the system will attempt\n # to load it directly from the message using the property name as the key.\n # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role',\n # while 'value' is loaded and used as 'content' in the chat template.\n message_property_mappings:\n role: from\n content: value\n # ...\n\n # Optional[Dict[str, List]]. Roles mapping in the messages.\n # The format is {target_role: [source_roles]}. All source roles will be mapped to the target role.\n # The default is:\n roles:\n user: [\"human\", \"user\"]\n assistant: [\"gpt\", \"assistant\"]\n system: [\"system\"]\n tool: [\"tool\"]\n\n # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If you wish to,\n # we recommend using a custom jinja template with the default system message removed or\n # adding a system turn with empty content.\n drop_system_message:\n\n # Optional[bool]. (for Qwen3 template only) Whether to split the assistant content based on a reasoning trace inside delimited tags\n # See example at `docs/dataset-formats/conversation.qmd`\n split_thinking:\n\n # IMPORTANT: The following fields determine which parts of the conversation to train on.\n # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train\n # See examples at `docs/dataset-formats/conversation.qmd`\n # Note: If the below 5 fields are empty, defaults to training only on the last message.\n\n # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: [\"assistant\"] # default\n # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are:\n # - all: train on all EOS tokens\n # - turn (default): train on the EOS token at the end of each trainable turn\n # - last: train on the last EOS token in the conversation\n # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.\n train_on_eos: turn\n # Optional[str]. Which EOT (End-of-Turn) tokens to train on in the conversation. Possible values are:\n # - all: train on all EOT tokens\n # - turn: train on the EOT token at the end of each trainable turn\n # - last: train on the last EOT token in the conversation\n # If not specified, defaults to the value of train_on_eos for backward compatibility.\n train_on_eot:\n # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.\n message_field_training: training\n # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.\n # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train).\n message_field_training_detail: train_detail\n\n\n# If false, the datasets will not be shuffled and will keep their original order in `datasets`.\n# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: true\n\n# Deduplicates datasets and test_datasets with identical entries.\ndataset_exact_deduplication: true\n\n# A list of one or more datasets to eval the model with.\n# You can use either test_datasets, or val_set_size, but not both.\ntest_datasets:\n - path: /workspace/data/eval.jsonl\n ds_type: json\n # You need to specify a split. For \"json\" datasets the default split is called \"train\".\n split: train\n type: completion\n data_files:\n - /workspace/data/eval.jsonl\n\n# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl:\nrl_beta: # Optional[float]. The beta parameter for the RL training.\n\n# dpo\ndpo_use_weighting: # Optional[bool]. Whether to perform weighting.\nrpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper.\n\n# orpo\norpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping.\n\n# kto\nkto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss.\nkto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss.\n\n# simpo\ncpo_alpha: 1.0 # Weight of the BC regularizer\nsimpo_gamma: 0.5 # Target reward margin for the SimPO loss\n\n# grpo\ntrl:\n use_vllm: # Optional[bool]. Whether to use VLLM for RL training.\n vllm_server_host: # Optional[str]. Host of the vLLM server to connect to.\n vllm_server_port: # Optional[int]. Port of the vLLM server to connect to.\n vllm_server_timeout: # Optional[int]. Total timeout (in seconds) to wait for the vLLM server to respond.\n vllm_guided_decoding_regex: # Optional[str]. Regex for vLLM guided decoding.\n\n beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use\n max_completion_length: # Optional[int]. Maximum length of the completion for RL training.\n\n reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir.\n reward_weights: # Optional[list[float]]. List of reward weights for the reward functions.\n\n num_generations: # Optional[int]. Number of generations to sample.\n log_completions: # Optional[bool]. Whether to log completions.\n num_completions_to_print: # Optional[int]. Number of completions to print when log_completions is True.\n\n sync_ref_model: # Optional[bool]. Whether to sync the reference model.\n ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model.\n ref_model_sync_steps: # Optional[int]. Sync steps for the reference model.\n scale_rewards: # Optional[bool]. Whether to scale rewards by their standard deviation.\n\n temperature: # Optional[float]. Sampling temperature for the GRPO policy.\n top_p: # Optional[float]. Top-p sampling probability for the generation policy.\n top_k: # Optional[int]. Top-k sampling for the generation policy.\n min_p: # Optional[float]. Minimum probability for the generation policy.\n repetition_penalty: # Optional[float]. Penalty for tokens that appear in prompt and generated text.\n\n num_iterations: # Optional[int]. Number of iterations per batch (μ) for GRPO.\n epsilon: # Optional[float]. Epsilon value for clipping in the GRPO algorithm.\n epsilon_high: # Optional[float]. Upper-bound epsilon value for clipping in the GRPO algorithm.\n use_liger_loss: # Optional[bool]. Whether to use Liger loss for GRPO.\n loss_type: # Optional[str]. Loss formulation to use. Supported values: grpo, bnpo, dr_grpo.\n mask_truncated_completions: # Optional[bool]. Whether to exclude truncated completions from loss calculation.\n\n\n# reward modelling: `True` or `False`\nreward_model:\n\n# process reward modelling: `True` or `False`\nprocess_reward_model:\n\n# The name of the chat template to use for training, following values are supported:\n# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value.\n# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer.\n# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n# The selected chat template will be saved to the tokenizer_config.json for easier inferencing\n# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template.\nchat_template: tokenizer_default\n# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.\nchat_template_jinja: null\n# Optional[List[str]]. Custom EOT (End-of-Turn) tokens to mask/unmask during training.\n# These tokens mark the boundaries between conversation turns.\n# For example: [\"/INST\", \"</s>\", \"[/SYSTEM_PROMPT]\"]\n# If not specified, defaults to just the model's eos_token.\n# This is useful for templates that use multiple delimiter tokens.\neot_tokens:\n # - \"</s>\"\n # - \"[/INST]\"\n # - \"[/SYSTEM_PROMPT]\"\n# Changes the default system message\ndefault_system_message: You are a helpful assistant. Please give a long and detailed answer. # Currently only supports chatml.\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: data/last_run_prepared\n# Push prepared dataset to hub\npush_dataset_to_hub: # Optional[str] repo_org/repo_name\n# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`\n# if not set.\ndataset_processes: # defaults to os.cpu_count() if not set\n# Keep dataset in memory while preprocessing\n# Only needed if cached dataset is taking too much storage\ndataset_keep_in_memory:\n# push checkpoints to hub\nhub_model_id: # private repo path to push finetuned model\n# how to push checkpoints to hub\n# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy\nhub_strategy:\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets\n# Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: # boolean\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.\nval_set_size: 0.04\n# Num shards for whole dataset\ndataset_shard_num:\n# Index of shard to use for whole dataset\ndataset_shard_idx:\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: 2048\n# Pad inputs so each step uses constant sized buffers\n# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently\npad_to_sequence_len:\n# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'\nsample_packing:\n# Set to 'false' if getting errors during eval with sample_packing on.\neval_sample_packing:\n# You can set these packing optimizations AFTER starting a training at least once.\n# The trainer will provide recommended values for these values.\nsample_packing_eff_est:\ntotal_num_tokens:\n# Increasing the following values helps with packing, but usually only slightly (<%1.)\n# The number of samples packed at a time.\nsample_packing_group_size: 100000\n# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples.\nsample_packing_bin_size: 200\nsample_pack_sequentially: # Optional[bool]. Whether to pack samples sequentially.\n\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation:\n\ncurriculum_sampling: # Optional[bool]. Whether to use sequential sampling for curriculum learning\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening:\n\n# Passed through to transformers when loading the model when launched without accelerate\n# Use `sequential` when training w/ model parallelism to limit memory\ndevice_map:\n# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.\nmax_memory:\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model\nadapter: lora\n# If you already have a lora model trained that you want to load, put that here.\n# This means after training, if you want to test the model, you should set this to the value of `output_dir`.\n# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir:\n\n# LoRA hyperparameters\n# For more details about the following options, see:\n# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2\nlora_r: 8\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_modules:\n - q_proj\n - v_proj\n# - k_proj\n# - o_proj\n# - gate_proj\n# - down_proj\n# - up_proj\nlora_target_linear: # If true, will target all linear modules\n\n# List[int] | int. # The layer indices to transform, otherwise, apply to all layers\n# https://huggingface.co/docs/peft/v0.15.0/en/package_reference/lora#peft.LoraConfig.layers_to_transform\npeft_layers_to_transform:\n\n# Optional[bool]. Whether to use DoRA.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#weight-decomposed-low-rank-adaptation-dora\npeft_use_dora:\n\n# Optional[bool]. Whether to use RSLoRA.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#rank-stabilized-lora\npeft_use_rslora:\n\n# Optional[list[tuple[int, int]]]. List of layer indices to replicate.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#memory-efficient-layer-replication-with-lora\npeft_layer_replication:\n\n# bool | Literal[\"gaussian\", \"eva\", \"olora\", \"pissa\", \"pissa_niter_[number of iters]\", \"corda\", \"loftq\"]\n# How to initialize LoRA weights. Default to True which is MS original implementation.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#initialization\npeft_init_lora_weights:\n\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.\n# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.\n# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\n# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994\nlora_modules_to_save:\n# - embed_tokens\n# - lm_head\n\nlora_fan_in_fan_out: false\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for\n# speed and memory savings\n# See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_mlp_kernel: true\nlora_qkv_kernel: true\nlora_o_kernel: true\n\n# LoRA+ hyperparameters\n# For more details about the following options, see:\n# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py`\nloraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6.\n\npeft:\n # Configuration options for loftq initialization for LoRA\n # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization\n loftq_config:\n loftq_bits: # typically 4 bits\n\n# ReLoRA configuration\n# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed\nrelora_steps: # Number of steps per ReLoRA restart\nrelora_warmup_steps: # Number of per-restart warmup steps\nrelora_anneal_steps: # Number of anneal steps for each relora cycle\nrelora_prune_ratio: # threshold for optimizer magnitude when pruning\nrelora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings\n\n# wandb configuration if you're using it\n# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.\nwandb_mode: # \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn off wandb\nwandb_project: # Your wandb project name\nwandb_entity: # A wandb Team name if using a Team\nwandb_watch:\nwandb_name: # Set the name of your wandb run\nwandb_run_id: # Set the ID of your wandb run\nwandb_log_model: # \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only at the end of training\n\n# mlflow configuration if you're using it\nmlflow_tracking_uri: # URI to mlflow\nmlflow_experiment_name: # Your experiment name\nmlflow_run_name: # Your run name\nhf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry\n\n# Comet configuration if you're using it\n# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`.\n# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start\nuse_comet: # Enable or disable Comet integration.\ncomet_api_key: # API key for Comet. Recommended to set via `comet login`.\ncomet_workspace: # Workspace name in Comet. Defaults to the user's default workspace.\ncomet_project_name: # Project name in Comet. Defaults to Uncategorized.\ncomet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key.\ncomet_mode: # Create a new experiment (\"create\") or log to an existing one (\"get\"). Default (\"get_or_create\") auto-selects based on configuration.\ncomet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True.\ncomet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details.\n\n# Tensorboard\nuse_tensorboard: # Optional[bool]\n\n# Where to save the full-finetuned model to\noutput_dir: ./completed-model\n\n# Whether to use torch.compile and which backend to use\n# setting to `auto` will enable torch compile when torch>=2.5.1\ntorch_compile: # Optional[Union[Literal[\"auto\"], bool]]\ntorch_compile_backend: # Optional[str]\n\n# Training hyperparameters\n\n# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.\ngradient_accumulation_steps: 1\n# The number of samples to include in each batch. This is the number of samples sent to each GPU.\n# Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: 2\neval_batch_size:\nnum_epochs: 4\nwarmup_steps: 100 # cannot use with warmup_ratio\nwarmup_ratio: 0.05 # cannot use with warmup_steps\nlearning_rate: 0.00003\nlr_quadratic_warmup:\nlogging_steps:\neval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps\nevals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps\neval_strategy: # Set to `\"no\"` to skip evaluation, `\"epoch\"` at end of each epoch, leave empty to infer from `eval_steps`.\nsave_strategy: # Set to `\"no\"` to skip checkpoint saves, `\"epoch\"` at end of each epoch, `\"best\"` when better result is achieved, leave empty to infer from `save_steps`.\nsave_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps\nsaves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsave_total_limit: # Checkpoints saved at a time\nsave_only_model: # Save only the model weights, skipping the optimizer. Using this means you can't resume from checkpoints.\n# Maximum number of iterations to train for. It precedes num_epochs which means that\n# if both are set, num_epochs will not be guaranteed.\n# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps:\n\n# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second: # Optional[bool]\n\n# whether to find batch size that fits in memory. Passed to underlying transformers Trainer\nauto_find_batch_size: # Optional[bool]\n\neval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0\neval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128\ndo_causal_lm_eval: # Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`.\neval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is [\"sacrebleu\", \"comet\", \"ter\", \"chrf\", \"perplexity\"]\n\nprofiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir.\n # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information\n # snapshots can be visualized @ https://pytorch.org/memory_viz\n\nloss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)\nloss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)\n\n# Save model as safetensors (require safetensors package)\nsave_safetensors:\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: false\n# Group similarly sized data to minimize padding.\n# May be slower to start, as it must download and sort the entire dataset.\n# Note that training loss may have an oscillating pattern with this enabled.\ngroup_by_length: false\n\n# Whether to use gradient checkpointing. Available options are: true, false, \"offload\", \"offload_disk\".\n# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: false\n# additional kwargs to pass to the trainer for gradient checkpointing\n# gradient_checkpointing_kwargs:\n# use_reentrant: true\n\n# Stop training after this many evaluation losses have increased in a row\n# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback\nearly_stopping_patience: 3\n\n# Specify a scheduler and kwargs to use with the optimizer\n# Valid values are driven by the Transformers SchedulerType class, see:\n# https://github.com/huggingface/transformers/blob/5f4ecf2d9f867a1255131d2461d75793c0cf1db2/src/transformers/trainer_utils.py#L420\n# Valid values include\n# - 'linear'\n# - 'cosine' (default)\n# - 'cosine_with_restarts'\n# - 'polynomial'\n# - 'constant'\n# - 'constant_with_warmup'\n# - 'inverse_sqrt'\n# - 'reduce_lr_on_plateau'\n# - 'cosine_with_min_lr'\n# - 'warmup_stable_decay'\n\n# Additional schedulers include:\n# - 'one_cycle'\n# - 'rex'\nlr_scheduler:\nlr_scheduler_kwargs:\ncosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr\ncosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)\n\n# For one_cycle optim\nlr_div_factor: # Learning rate div factor\n\n# Specify optimizer\n# Valid values are driven by the Transformers OptimizerNames class, see:\n# https://github.com/huggingface/transformers/blob/cbf924b76c03828101a34069a96d209314114fd5/src/transformers/training_args.py#L144-L189\n#\n# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of\n# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used\n# in the examples/ for your model and fine-tuning use case.\n#\n# Valid values for 'optimizer' include:\n# - adamw_torch\n# - adamw_torch_fused (default)\n# - adamw_torch_xla\n# - adamw_torch_npu_fused\n# - adamw_apex_fused\n# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1)\n# - adafactor\n# - adamw_anyprecision\n# - adamw_torch_4bit\n# - ademamix\n# - sgd\n# - adagrad\n# - adamw_bnb_8bit\n# - adamw_8bit # alias for adamw_bnb_8bit\n# - ademamix_8bit\n# - lion_8bit\n# - lion_32bit\n# - paged_adamw_32bit\n# - paged_adamw_8bit\n# - paged_ademamix_32bit\n# - paged_ademamix_8bit\n# - paged_lion_32bit\n# - paged_lion_8bit\n# - rmsprop\n# - rmsprop_bnb\n# - rmsprop_bnb_8bit\n# - rmsprop_bnb_32bit\n# - galore_adamw\n# - galore_adamw_8bit\n# - galore_adafactor\n# - galore_adamw_layerwise\n# - galore_adamw_8bit_layerwise\n# - galore_adafactor_layerwise\n# - lomo\n# - adalomo\n# - grokadamw\n# - schedule_free_adamw\n# - schedule_free_sgd\n# - apollo_adamw\n# - apollo_adamw_layerwise\n#\n# Additional custom optimizers include:\n# - optimi_adamw\n# - ao_adamw_8bit\n# - ao_adamw_fp8\n# - came_pytorch\noptimizer:\n# Dictionary of arguments to pass to the optimizer\noptim_args:\n# For Galore Optimizers the following optim_args are available\n# rank: # type: int\n# update_proj_gap # type: int\n# scale # type: float\n# proj_type: # type: str, default = std\n\n# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm\noptim_target_modules:\n# - self_attn # for llama\n# - mlp\n\n# Specify weight decay\nweight_decay:\n# adamw hyperparams\nadam_beta1:\nadam_beta2:\nadam_beta3: # only used for CAME Optimizer\nadam_epsilon:\nadam_epsilon2: # only used for CAME Optimizer\n# Gradient clipping max norm\nmax_grad_norm:\n\n# Augmentation techniques\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings\n# currently only supported on Llama and Mistral\nneftune_noise_alpha:\n\n# Optional[bool]. Whether to bettertransformers\nflash_optimum:\n\n# Note: Only one of the following attention patches can be used at a time.\n# For example, if you set `xformers_attention` to `true`, do not set `flash_attention` to `true`.\n\n# Optional[bool]. Whether to use xformers attention patch https://github.com/facebookresearch/xformers:\nxformers_attention:\n# Optional[bool]. Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:\nflash_attention:\nflash_attn_cross_entropy: # Optional[bool]. Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_rms_norm: # Optional[bool]. Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_fuse_qkv: # Optional[bool]. Whether to fuse QKV into a single operation\nflash_attn_fuse_mlp: # Optional[bool]. Whether to fuse part of the MLP into a single operation\n# Optional[bool]. Whether to use scaled-dot-product attention\n# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention:\n# Optional[bool]. Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention:\n\n# Optional[bool]. Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage:\n# Optional[str]. Resume from a specific checkpoint dir\nresume_from_checkpoint:\n# Optional[bool]. If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: false\n\n## Multimodal section\n# int | tuple[int, int] | None . Size to resize images to, width x height.\n# Will read from model/processor config if not set.\nimage_size:\n# str. Algorithm to use for image resizing. \"bilinear\", \"bicubic\", \"lanczos\". Default is \"bilinear\".\nimage_resize_algorithm: 'bilinear'\n## End of multimodal section\n\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank:\n\n# Add or change special tokens.\n# If you add tokens here, you don't need to add them to the `tokens` list.\nspecial_tokens:\n # bos_token: \"<s>\"\n # eos_token: \"</s>\"\n # unk_token: \"<unk>\"\n # pad_token: \"[PAD]\"\n\n# Optional[list[str]]. Add extra tokens to the tokenizer.\ntokens:\n # - \"<|startoftext|>\"\n # - \"<|endoftext|>\"\n\n# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.\n# Only works for tokens that are not part of the base vocab (aka are added_tokens).\n# Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: # Dict[int, str]\n# 128041: \"<|im_start|>\"\n# 128042: \"<|im_end|>\"\n\n# FSDP\nfsdp:\nfsdp_config:\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed:\n\n# Advanced DDP Arguments\nddp_timeout:\nddp_bucket_cap_mb:\nddp_broadcast_buffers:\n\n# Sequence parallelism\n# Set to a divisor of the number of GPUs available to split sequences into chunks of equal size.\n# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM.\n# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized\n# subsequences, or set to 4 to split into four equal-sized subsequences.\n# See https://docs.axolotl.ai/docs/sequence_parallelism.html for more details.\nsequence_parallel_degree:\n# Optional; strides across the key dimension. Larger values use more memory but should make training faster.\n# Must evenly divide the number of KV heads in your model.\nheads_k_stride: 1\n# One of \"varlen_llama3\", \"batch_ring\", \"batch_zigzag\", \"batch_stripe\". Defaults to \"varlen_llama3\"\n# in the sample packing case, and \"batch_ring\" in the non-sample packing case.\nring_attn_func:\n\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path:\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset:\n\n# Debug mode\ndebug:\n\n# Seed\nseed:\n\n# Allow overwrite yml config using from cli\nstrict:", + "text": "# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files\n# This can also be a relative path to a model on disk\nbase_model: ./llama-7b-hf\n# You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)\nbase_model_ignore_patterns:\n# If the base_model repo on hf hub doesn't include configuration .json files,\n# You can set that here, or leave this empty to default to base_model\nbase_model_config: ./llama-7b-hf\n# You can specify to choose a specific model revision from huggingface hub\nrevision_of_model:\n# Optional tokenizer configuration path in case you want to use a different tokenizer\n# than the one defined in the base model\ntokenizer_config:\n# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too\nmodel_type: AutoModelForCausalLM\n# Corresponding tokenizer for the model AutoTokenizer is a good choice\ntokenizer_type: AutoTokenizer\n# Trust remote code for untrusted source\ntrust_remote_code:\n# use_fast option for tokenizer loading from_pretrained, default to True\ntokenizer_use_fast:\n# Whether to use the legacy tokenizer setting, defaults to True\ntokenizer_legacy:\n# Resize the model embeddings when new tokens are added to multiples of 32\n# This is reported to improve training speed on some models\nresize_token_embeddings_to_32x:\n# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.\nshrink_embeddings:\n# Optional[bool] Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs\nembeddings_skip_upcast:\n# Whether to load the model with randomly initialized weights. Useful for\n# pre-training a model from scratch or debugging purposes.\nrandom_init_weights:\n\n# (Internal use only)\n# Used to identify which the model is based on\nis_falcon_derived_model:\nis_llama_derived_model:\nis_qwen_derived_model:\n# Please note that if you set this to true, `padding_side` will be set to \"left\" by default\nis_mistral_derived_model:\n\n# optional overrides to the base model configuration\noverrides_of_model_config:\n # RoPE Scaling https://github.com/huggingface/transformers/pull/24653\n rope_scaling:\n type: # linear | dynamic\n factor: # float\n\n# optional overrides the base model loading from_pretrained\noverrides_of_model_kwargs:\n # use_cache: False\n\n# optional overrides to the bnb 4bit quantization configuration\n# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig\nbnb_config_kwargs:\n # These are default values\n llm_int8_has_fp16_weight: false\n bnb_4bit_quant_type: nf4\n bnb_4bit_use_double_quant: true\n\n# quantization aware training\nqat:\n activation_dtype: # Optional[str] = \"int8\". Fake quantization layout to use for activation quantization. Valid options are \"int4\" and \"int8\"\n weight_dtype: # Optional[str] = \"int8\". Fake quantization layout to use for weight quantization. Valid options are \"int4\" and \"int8\"\n group_size: # Optional[int] = 32. The number of elements in each group for per-group fake quantization\n fake_quant_after_n_steps: # Optional[int] = None. The number of steps to apply fake quantization after\n\n# post-training quantization\nquantization:\n weight_dtype: # Optional[str] = \"int8\". Fake quantization layout to use for weight quantization. Valid options are uintX for X in [1, 2, 3, 4, 5, 6, 7], or int4, or int8\n activation_dtype: # Optional[str] = \"int8\". Fake quantization layout to use for activation quantization. Valid options are \"int4\" and \"int8\"\n group_size: # Optional[int] = 32. The number of elements in each group for per-group fake quantization\n quantize_embedding: # Optional[bool] = False. Whether to quantize the embedding layer.\n\n\n# Whether you are training a 4-bit GPTQ quantized model\ngptq: true\n\n# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer\nload_in_8bit: true\n# Use bitsandbytes 4 bit\nload_in_4bit:\n\n# Use CUDA bf16\nbf16: true # bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection. require >=ampere\n# Use CUDA fp16\nfp16: true\n# Use CUDA tf32\ntf32: true # require >=ampere\n# Note: if bf16 is set to 'auto', and fp16 is set to true, we will prefer the explict fp16 setting\n\n# No AMP (automatic mixed precision)\nbfloat16: true # require >=ampere\nfloat16: true\n\n# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset\ngpu_memory_limit: 20GiB\n# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge\nlora_on_cpu: true\n\n# List[str]. Add plugins to extend the pipeline.\n# See `src/axolotl/integrations` for the available plugins or doc below for more details.\n# https://docs.axolotl.ai/docs/custom_integrations.html\nplugins:\n # - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin\n\n# A list of one or more datasets to finetune the model with\n# See https://docs.axolotl.ai/docs/dataset_loading.html for guide on loading datasets\n# See https://docs.axolotl.ai/docs/dataset-formats/ for guide on dataset formats\ndatasets:\n # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory\n - path: vicgalle/alpaca-gpt4\n # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]\n type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>\n ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file\n data_files: # Optional[str] path to source data files\n\n shards: # Optional[int] split dataset into N pieces (use with shards_idx)\n shards_idx: # Optional[int] = 0 the index of sharded dataset to use\n\n preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)\n\n name: # Optional[str] name of dataset configuration to load\n split: train # Optional[str] name of dataset split to load from\n revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets.\n trust_remote_code: # Optional[bool] Trust remote code for untrusted source\n\n # Custom user instruction prompt\n - path: repo\n type:\n # The below are defaults. only set what's needed if you use a different column name.\n system_prompt: \"\"\n system_format: \"{system}\"\n field_system: system\n field_instruction: instruction\n field_input: input\n field_output: output\n\n # Customizable to be single line or multi-line\n # Use {instruction}/{input} as key to be replaced\n # 'format' can include {input}\n format: |-\n User: {instruction} {input}\n Assistant:\n # 'no_input_format' cannot include {input}\n no_input_format: \"{instruction} \"\n\n # For `completion` datsets only, uses the provided field instead of `text` column\n field:\n\n # Using chat template\n - path: ...\n # Set type to `chat_template` to use this strategy\n type: chat_template\n # Specify the name of the chat template to use\n # The name of the chat template to use for training, following values are supported:\n # - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default.\n # - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n # - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml.\n # - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n chat_template: tokenizer_default\n\n # Custom jinja chat template. Used only if `chat_template: jinja` or empty.\n chat_template_jinja:\n\n # Key containing the messages (default: \"messages\")\n field_messages: messages\n\n # Key containing the system message (default: \"system\")\n # If the system message is not present in the dataset sample, it will be loaded from the field_system property.\n field_system: system\n\n # Mapping of properties from the input dataset to the chat template.\n # (default: message_property_mappings={'role':'role', 'content':'content'})\n # If a property exists in the template but not in this mapping, the system will attempt\n # to load it directly from the message using the property name as the key.\n # Example: In the mapping below, 'from' is loaded from input dataset and used as 'role',\n # while 'value' is loaded and used as 'content' in the chat template.\n message_property_mappings:\n role: from\n content: value\n # ...\n\n # Optional[Dict[str, List]]. Roles mapping in the messages.\n # The format is {target_role: [source_roles]}. All source roles will be mapped to the target role.\n # The default is:\n roles:\n user: [\"human\", \"user\"]\n assistant: [\"gpt\", \"assistant\"]\n system: [\"system\"]\n tool: [\"tool\"]\n\n # Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template.\n # This does not drop the default system message from chat_template if it exists. If you wish to,\n # we recommend using a custom jinja template with the default system message removed or\n # adding a system turn with empty content.\n drop_system_message:\n\n # Optional[bool]. (for Qwen3 template only) Whether to split the assistant content based on a reasoning trace inside delimited tags\n # See example at `docs/dataset-formats/conversation.qmd`\n split_thinking:\n\n # IMPORTANT: The following fields determine which parts of the conversation to train on.\n # Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train\n # See examples at `docs/dataset-formats/conversation.qmd`\n # Note: If the below 5 fields are empty, defaults to training only on the last message.\n\n # Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.\n roles_to_train: [\"assistant\"] # default\n # Optional[str]. Which EOS tokens to train on in the conversation. Possible values are:\n # - all: train on all EOS tokens\n # - turn (default): train on the EOS token at the end of each trainable turn\n # - last: train on the last EOS token in the conversation\n # TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.\n train_on_eos: turn\n # Optional[str]. Which EOT (End-of-Turn) tokens to train on in the conversation. Possible values are:\n # - all: train on all EOT tokens\n # - turn: train on the EOT token at the end of each trainable turn\n # - last: train on the last EOT token in the conversation\n # If not specified, defaults to the value of train_on_eos for backward compatibility.\n train_on_eot:\n # The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.\n message_field_training: training\n # The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.\n # The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train).\n message_field_training_detail: train_detail\n\n\n# If false, the datasets will not be shuffled and will keep their original order in `datasets`.\n# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true.\nshuffle_merged_datasets: true\n\n# Deduplicates datasets and test_datasets with identical entries.\ndataset_exact_deduplication: true\n\n# A list of one or more datasets to eval the model with.\n# You can use either test_datasets, or val_set_size, but not both.\ntest_datasets:\n - path: /workspace/data/eval.jsonl\n ds_type: json\n # You need to specify a split. For \"json\" datasets the default split is called \"train\".\n split: train\n type: completion\n data_files:\n - /workspace/data/eval.jsonl\n\n# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'\nrl:\nrl_beta: # Optional[float]. The beta parameter for the RL training.\n\n# dpo\ndpo_use_weighting: # Optional[bool]. Whether to perform weighting.\nrpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper.\n\n# orpo\norpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping.\n\n# kto\nkto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss.\nkto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss.\n\n# simpo\ncpo_alpha: 1.0 # Weight of the BC regularizer\nsimpo_gamma: 0.5 # Target reward margin for the SimPO loss\n\n# grpo\ntrl:\n use_vllm: # Optional[bool]. Whether to use VLLM for RL training.\n vllm_server_host: # Optional[str]. Host of the vLLM server to connect to.\n vllm_server_port: # Optional[int]. Port of the vLLM server to connect to.\n vllm_server_timeout: # Optional[int]. Total timeout (in seconds) to wait for the vLLM server to respond.\n vllm_guided_decoding_regex: # Optional[str]. Regex for vLLM guided decoding.\n\n beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use\n max_completion_length: # Optional[int]. Maximum length of the completion for RL training.\n\n reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir.\n reward_weights: # Optional[list[float]]. List of reward weights for the reward functions.\n\n num_generations: # Optional[int]. Number of generations to sample.\n log_completions: # Optional[bool]. Whether to log completions.\n num_completions_to_print: # Optional[int]. Number of completions to print when log_completions is True.\n\n sync_ref_model: # Optional[bool]. Whether to sync the reference model.\n ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model.\n ref_model_sync_steps: # Optional[int]. Sync steps for the reference model.\n scale_rewards: # Optional[bool]. Whether to scale rewards by their standard deviation.\n\n temperature: # Optional[float]. Sampling temperature for the GRPO policy.\n top_p: # Optional[float]. Top-p sampling probability for the generation policy.\n top_k: # Optional[int]. Top-k sampling for the generation policy.\n min_p: # Optional[float]. Minimum probability for the generation policy.\n repetition_penalty: # Optional[float]. Penalty for tokens that appear in prompt and generated text.\n\n num_iterations: # Optional[int]. Number of iterations per batch (μ) for GRPO.\n epsilon: # Optional[float]. Epsilon value for clipping in the GRPO algorithm.\n epsilon_high: # Optional[float]. Upper-bound epsilon value for clipping in the GRPO algorithm.\n use_liger_loss: # Optional[bool]. Whether to use Liger loss for GRPO.\n loss_type: # Optional[str]. Loss formulation to use. Supported values: grpo, bnpo, dr_grpo.\n mask_truncated_completions: # Optional[bool]. Whether to exclude truncated completions from loss calculation.\n\n\n# reward modelling: `True` or `False`\nreward_model:\n\n# process reward modelling: `True` or `False`\nprocess_reward_model:\n\n# The name of the chat template to use for training, following values are supported:\n# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value.\n# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py\n# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer.\n# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.\n# The selected chat template will be saved to the tokenizer_config.json for easier inferencing\n# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template.\nchat_template: tokenizer_default\n# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.\nchat_template_jinja: null\n# Optional[List[str]]. Custom EOT (End-of-Turn) tokens to mask/unmask during training.\n# These tokens mark the boundaries between conversation turns.\n# For example: [\"/INST\", \"</s>\", \"[/SYSTEM_PROMPT]\"]\n# If not specified, defaults to just the model's eos_token.\n# This is useful for templates that use multiple delimiter tokens.\neot_tokens:\n # - \"</s>\"\n # - \"[/INST]\"\n # - \"[/SYSTEM_PROMPT]\"\n# Changes the default system message\ndefault_system_message: You are a helpful assistant. Please give a long and detailed answer. # Currently only supports chatml.\n# Axolotl attempts to save the dataset as an arrow after packing the data together so\n# subsequent training attempts load faster, relative path\ndataset_prepared_path: data/last_run_prepared\n# Push prepared dataset to hub\npush_dataset_to_hub: # Optional[str] repo_org/repo_name\n# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`\n# if not set.\ndataset_processes: # defaults to os.cpu_count() if not set\n# Keep dataset in memory while preprocessing\n# Only needed if cached dataset is taking too much storage\ndataset_keep_in_memory:\n# push checkpoints to hub\nhub_model_id: # private repo path to push finetuned model\n# how to push checkpoints to hub\n# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy\nhub_strategy:\n# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets\n# Required to be true when used in combination with `push_dataset_to_hub`\nhf_use_auth_token: # boolean\n# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.\nval_set_size: 0.04\n# Num shards for whole dataset\ndataset_shard_num:\n# Index of shard to use for whole dataset\ndataset_shard_idx:\n\n# The maximum length of an input to train with, this should typically be less than 2048\n# as most models have a token/context limit of 2048\nsequence_len: 2048\n# Pad inputs so each step uses constant sized buffers\n# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently\npad_to_sequence_len:\n# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'\nsample_packing:\n# Set to 'false' if getting errors during eval with sample_packing on.\neval_sample_packing:\n# You can set these packing optimizations AFTER starting a training at least once.\n# The trainer will provide recommended values for these values.\nsample_packing_eff_est:\ntotal_num_tokens:\n# Increasing the following values helps with packing, but usually only slightly (<%1.)\n# The number of samples packed at a time.\nsample_packing_group_size: 100000\n# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples.\nsample_packing_bin_size: 200\nsample_pack_sequentially: # Optional[bool]. Whether to pack samples sequentially.\n\n# whether to concatenate samples during pretraining\npretraining_sample_concatenation:\n\ncurriculum_sampling: # Optional[bool]. Whether to use sequential sampling for curriculum learning\n\n# Use batch flattening for speedups when not using sample_packing\nbatch_flattening:\n\n# Passed through to transformers when loading the model when launched without accelerate\n# Use `sequential` when training w/ model parallelism to limit memory\ndevice_map:\n# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.\nmax_memory:\n\n# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model\nadapter: lora\n# If you already have a lora model trained that you want to load, put that here.\n# This means after training, if you want to test the model, you should set this to the value of `output_dir`.\n# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`.\nlora_model_dir:\n\n# LoRA hyperparameters\n# For more details about the following options, see:\n# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2\nlora_r: 8\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_modules:\n - q_proj\n - v_proj\n# - k_proj\n# - o_proj\n# - gate_proj\n# - down_proj\n# - up_proj\nlora_target_linear: # If true, will target all linear modules\n\n# List[int] | int. # The layer indices to transform, otherwise, apply to all layers\n# https://huggingface.co/docs/peft/v0.15.0/en/package_reference/lora#peft.LoraConfig.layers_to_transform\npeft_layers_to_transform:\n\n# Optional[bool]. Whether to use DoRA.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#weight-decomposed-low-rank-adaptation-dora\npeft_use_dora:\n\n# Optional[bool]. Whether to use RSLoRA.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#rank-stabilized-lora\npeft_use_rslora:\n\n# Optional[list[tuple[int, int]]]. List of layer indices to replicate.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#memory-efficient-layer-replication-with-lora\npeft_layer_replication:\n\n# bool | Literal[\"gaussian\", \"eva\", \"olora\", \"pissa\", \"pissa_niter_[number of iters]\", \"corda\", \"loftq\"]\n# How to initialize LoRA weights. Default to True which is MS original implementation.\n# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#initialization\npeft_init_lora_weights:\n\n# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.\n# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.\n# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.\n# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994\nlora_modules_to_save:\n# - embed_tokens\n# - lm_head\n\nlora_fan_in_fan_out: false\n\n# Apply custom LoRA autograd functions and activation function Triton kernels for\n# speed and memory savings\n# See: https://docs.axolotl.ai/docs/lora_optims.html\nlora_mlp_kernel: true\nlora_qkv_kernel: true\nlora_o_kernel: true\n\n# LoRA+ hyperparameters\n# For more details about the following options, see:\n# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py`\nloraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.\nloraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6.\n\npeft:\n # Configuration options for loftq initialization for LoRA\n # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization\n loftq_config:\n loftq_bits: # typically 4 bits\n\n# ReLoRA configuration\n# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed\nrelora_steps: # Number of steps per ReLoRA restart\nrelora_warmup_steps: # Number of per-restart warmup steps\nrelora_anneal_steps: # Number of anneal steps for each relora cycle\nrelora_prune_ratio: # threshold for optimizer magnitude when pruning\nrelora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings\n\n# wandb configuration if you're using it\n# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.\nwandb_mode: # \"offline\" to save run metadata locally and not sync to the server, \"disabled\" to turn off wandb\nwandb_project: # Your wandb project name\nwandb_entity: # A wandb Team name if using a Team\nwandb_watch:\nwandb_name: # Set the name of your wandb run\nwandb_run_id: # Set the ID of your wandb run\nwandb_log_model: # \"checkpoint\" to log model to wandb Artifacts every `save_steps` or \"end\" to log only at the end of training\n\n# mlflow configuration if you're using it\nmlflow_tracking_uri: # URI to mlflow\nmlflow_experiment_name: # Your experiment name\nmlflow_run_name: # Your run name\nhf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry\n\n# Comet configuration if you're using it\n# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`.\n# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start\nuse_comet: # Enable or disable Comet integration.\ncomet_api_key: # API key for Comet. Recommended to set via `comet login`.\ncomet_workspace: # Workspace name in Comet. Defaults to the user's default workspace.\ncomet_project_name: # Project name in Comet. Defaults to Uncategorized.\ncomet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key.\ncomet_mode: # Create a new experiment (\"create\") or log to an existing one (\"get\"). Default (\"get_or_create\") auto-selects based on configuration.\ncomet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True.\ncomet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details.\n\n# Tensorboard\nuse_tensorboard: # Optional[bool]\n\n# Where to save the full-finetuned model to\noutput_dir: ./completed-model\n\n# Whether to use torch.compile and which backend to use\n# setting to `auto` will enable torch compile when torch>=2.5.1\ntorch_compile: # Optional[Union[Literal[\"auto\"], bool]]\ntorch_compile_backend: # Optional[str]\ntorch_compile_mode: # 'default' | 'reduce-overhead' | 'max-autotune'\n\n# Training hyperparameters\n\n# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.\ngradient_accumulation_steps: 1\n# The number of samples to include in each batch. This is the number of samples sent to each GPU.\n# Batch size per gpu = micro_batch_size * gradient_accumulation_steps\nmicro_batch_size: 2\neval_batch_size:\nnum_epochs: 4\nwarmup_steps: 100 # cannot use with warmup_ratio\nwarmup_ratio: 0.05 # cannot use with warmup_steps\nlearning_rate: 0.00003\nlr_quadratic_warmup:\nlogging_steps:\neval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps\nevals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps\neval_strategy: # Set to `\"no\"` to skip evaluation, `\"epoch\"` at end of each epoch, leave empty to infer from `eval_steps`.\nsave_strategy: # Set to `\"no\"` to skip checkpoint saves, `\"epoch\"` at end of each epoch, `\"best\"` when better result is achieved, leave empty to infer from `save_steps`.\nsave_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps\nsaves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps\nsave_total_limit: # Checkpoints saved at a time\nsave_only_model: # Save only the model weights, skipping the optimizer. Using this means you can't resume from checkpoints.\n# Maximum number of iterations to train for. It precedes num_epochs which means that\n# if both are set, num_epochs will not be guaranteed.\n# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps\nmax_steps:\n\n# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time.\ninclude_tokens_per_second: # Optional[bool]\n\n# whether to find batch size that fits in memory. Passed to underlying transformers Trainer\nauto_find_batch_size: # Optional[bool]\n\neval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0\neval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128\ndo_causal_lm_eval: # Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`.\neval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is [\"sacrebleu\", \"comet\", \"ter\", \"chrf\", \"perplexity\"]\n\nprofiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir.\n # see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information\n # snapshots can be visualized @ https://pytorch.org/memory_viz\n\nloss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)\nloss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)\n\n# Save model as safetensors (require safetensors package). Default True\nsave_safetensors:\n\n# Whether to mask out or include the human's prompt from the training labels\ntrain_on_inputs: false\n# Group similarly sized data to minimize padding.\n# May be slower to start, as it must download and sort the entire dataset.\n# Note that training loss may have an oscillating pattern with this enabled.\ngroup_by_length: false\n\n# Whether to use gradient checkpointing. Available options are: true, false, \"offload\", \"offload_disk\".\n# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\ngradient_checkpointing: false\n# additional kwargs to pass to the trainer for gradient checkpointing\n# gradient_checkpointing_kwargs:\n# use_reentrant: true\n\n# Stop training after this many evaluation losses have increased in a row\n# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback\nearly_stopping_patience: 3\n\n# Specify a scheduler and kwargs to use with the optimizer\n# Valid values are driven by the Transformers SchedulerType class, see:\n# https://github.com/huggingface/transformers/blob/5f4ecf2d9f867a1255131d2461d75793c0cf1db2/src/transformers/trainer_utils.py#L420\n# Valid values include\n# - 'linear'\n# - 'cosine' (default)\n# - 'cosine_with_restarts'\n# - 'polynomial'\n# - 'constant'\n# - 'constant_with_warmup'\n# - 'inverse_sqrt'\n# - 'reduce_lr_on_plateau'\n# - 'cosine_with_min_lr'\n# - 'warmup_stable_decay'\n\n# Additional schedulers include:\n# - 'one_cycle'\n# - 'rex'\nlr_scheduler:\nlr_scheduler_kwargs:\ncosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr\ncosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)\n\n# For one_cycle optim\nlr_div_factor: # Learning rate div factor\n\n# Specify optimizer\n# Valid values are driven by the Transformers OptimizerNames class, see:\n# https://github.com/huggingface/transformers/blob/cbf924b76c03828101a34069a96d209314114fd5/src/transformers/training_args.py#L144-L189\n#\n# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of\n# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used\n# in the examples/ for your model and fine-tuning use case.\n#\n# Valid values for 'optimizer' include:\n# - adamw_torch\n# - adamw_torch_fused (default)\n# - adamw_torch_xla\n# - adamw_torch_npu_fused\n# - adamw_apex_fused\n# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1)\n# - adafactor\n# - adamw_anyprecision\n# - adamw_torch_4bit\n# - ademamix\n# - sgd\n# - adagrad\n# - adamw_bnb_8bit\n# - adamw_8bit # alias for adamw_bnb_8bit\n# - ademamix_8bit\n# - lion_8bit\n# - lion_32bit\n# - paged_adamw_32bit\n# - paged_adamw_8bit\n# - paged_ademamix_32bit\n# - paged_ademamix_8bit\n# - paged_lion_32bit\n# - paged_lion_8bit\n# - rmsprop\n# - rmsprop_bnb\n# - rmsprop_bnb_8bit\n# - rmsprop_bnb_32bit\n# - galore_adamw\n# - galore_adamw_8bit\n# - galore_adafactor\n# - galore_adamw_layerwise\n# - galore_adamw_8bit_layerwise\n# - galore_adafactor_layerwise\n# - lomo\n# - adalomo\n# - grokadamw\n# - schedule_free_adamw\n# - schedule_free_sgd\n# - apollo_adamw\n# - apollo_adamw_layerwise\n#\n# Additional custom optimizers include:\n# - optimi_adamw\n# - ao_adamw_8bit\n# - ao_adamw_fp8\n# - came_pytorch\noptimizer:\n# Dictionary of arguments to pass to the optimizer\noptim_args:\n# For Galore Optimizers the following optim_args are available\n# rank: # type: int\n# update_proj_gap # type: int\n# scale # type: float\n# proj_type: # type: str, default = std\n\n# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm\noptim_target_modules:\n# - self_attn # for llama\n# - mlp\n\n# Specify weight decay\nweight_decay:\n# adamw hyperparams\nadam_beta1:\nadam_beta2:\nadam_beta3: # only used for CAME Optimizer\nadam_epsilon:\nadam_epsilon2: # only used for CAME Optimizer\n# Gradient clipping max norm\nmax_grad_norm:\n\n# Augmentation techniques\n# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings\n# currently only supported on Llama and Mistral\nneftune_noise_alpha:\n\n# Optional[bool]. Whether to bettertransformers\nflash_optimum:\n\n# Note: Only one of the following attention patches can be used at a time.\n# For example, if you set `xformers_attention` to `true`, do not set `flash_attention` to `true`.\n\n# Optional[bool]. Whether to use xformers attention patch https://github.com/facebookresearch/xformers:\nxformers_attention:\n# Optional[bool]. Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:\nflash_attention:\nflash_attn_cross_entropy: # Optional[bool]. Whether to use flash-attention cross entropy implementation - advanced use only\nflash_attn_rms_norm: # Optional[bool]. Whether to use flash-attention rms norm implementation - advanced use only\nflash_attn_fuse_qkv: # Optional[bool]. Whether to fuse QKV into a single operation\nflash_attn_fuse_mlp: # Optional[bool]. Whether to fuse part of the MLP into a single operation\n# Optional[bool]. Whether to use scaled-dot-product attention\n# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html\nsdp_attention:\n# Optional[bool]. Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf\ns2_attention:\n\n# Optional[bool]. Whether to use low_cpu_mem_usage\nlow_cpu_mem_usage:\n# Optional[str]. Resume from a specific checkpoint dir\nresume_from_checkpoint:\n# Optional[bool]. If resume_from_checkpoint isn't set and you simply want it to start where it left off.\n# Be careful with this being turned on between different models.\nauto_resume_from_checkpoints: false\n\n## Multimodal section\n# int | tuple[int, int] | None . Size to resize images to, width x height.\n# Will read from model/processor config if not set.\nimage_size:\n# str. Algorithm to use for image resizing. \"bilinear\", \"bicubic\", \"lanczos\". Default is \"bilinear\".\nimage_resize_algorithm: 'bilinear'\n## End of multimodal section\n\n# Don't mess with this, it's here for accelerate and torchrun\nlocal_rank:\n\n# Add or change special tokens.\n# If you add tokens here, you don't need to add them to the `tokens` list.\nspecial_tokens:\n # bos_token: \"<s>\"\n # eos_token: \"</s>\"\n # unk_token: \"<unk>\"\n # pad_token: \"[PAD]\"\n\n# Optional[list[str]]. Add extra tokens to the tokenizer.\ntokens:\n # - \"<|startoftext|>\"\n # - \"<|endoftext|>\"\n\n# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.\n# Only works for tokens that are not part of the base vocab (aka are added_tokens).\n# Can be checked if they exist in tokenizer.json added_tokens.\nadded_tokens_overrides: # Dict[int, str]\n# 128041: \"<|im_start|>\"\n# 128042: \"<|im_end|>\"\n\n# FSDP\nfsdp:\nfsdp_config:\n\n# Deepspeed config path. e.g., deepspeed_configs/zero3.json\ndeepspeed:\n\n# Advanced DDP Arguments\nddp_timeout:\nddp_bucket_cap_mb:\nddp_broadcast_buffers:\n\n# Sequence parallelism\n# Set to a divisor of the number of GPUs available to split sequences into chunks of equal size.\n# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM.\n# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized\n# subsequences, or set to 4 to split into four equal-sized subsequences.\n# See https://docs.axolotl.ai/docs/sequence_parallelism.html for more details.\nsequence_parallel_degree:\n# Optional; strides across the key dimension. Larger values use more memory but should make training faster.\n# Must evenly divide the number of KV heads in your model.\nheads_k_stride: 1\n# One of \"varlen_llama3\", \"batch_ring\", \"batch_zigzag\", \"batch_stripe\". Defaults to \"varlen_llama3\"\n# in the sample packing case, and \"batch_ring\" in the non-sample packing case.\nring_attn_func:\n\n# Path to torch distx for optim 'adamw_anyprecision'\ntorchdistx_path:\n\n# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize\npretraining_dataset:\n\n# Debug mode\ndebug:\n\n# Seed\nseed:\n\n# Allow overwrite yml config using from cli\nstrict:", "crumbs": [ "Getting Started", "Config Reference" diff --git a/sitemap.xml b/sitemap.xml index 4b10f4d6f..f5989912c 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,754 +2,762 @@ https://docs.axolotl.ai/TODO.html - 2025-05-28T20:20:30.191Z + 2025-05-30T04:21:58.270Z https://docs.axolotl.ai/docs/debugging.html - 2025-05-28T20:20:30.194Z + 2025-05-30T04:21:58.272Z https://docs.axolotl.ai/docs/rlhf.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/input_output.html - 2025-05-28T20:20:30.196Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/lr_groups.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/dataset-formats/template_free.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.272Z https://docs.axolotl.ai/docs/dataset-formats/pretraining.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.271Z https://docs.axolotl.ai/docs/dataset-formats/conversation.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.271Z https://docs.axolotl.ai/docs/dataset-formats/inst_tune.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.271Z https://docs.axolotl.ai/docs/fsdp_qlora.html - 2025-05-28T20:20:30.194Z + 2025-05-30T04:21:58.272Z https://docs.axolotl.ai/docs/torchao.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/dataset_preprocessing.html - 2025-05-28T20:20:30.194Z + 2025-05-30T04:21:58.272Z https://docs.axolotl.ai/docs/batch_vs_grad.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.271Z https://docs.axolotl.ai/docs/nccl.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/lora_optims.html - 2025-05-28T20:20:30.196Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/multipack.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/faq.html - 2025-05-28T20:20:30.194Z + 2025-05-30T04:21:58.272Z https://docs.axolotl.ai/docs/quantize.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/sequence_parallelism.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/multi-gpu.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/api/utils.collators.core.html - 2025-05-28T20:21:01.349Z + 2025-05-30T04:22:38.632Z https://docs.axolotl.ai/docs/api/utils.collators.mm_chat.html - 2025-05-28T20:21:01.377Z + 2025-05-30T04:22:38.659Z https://docs.axolotl.ai/docs/api/prompt_strategies.pygmalion.html - 2025-05-28T20:21:00.653Z + 2025-05-30T04:22:37.924Z https://docs.axolotl.ai/docs/api/cli.cloud.base.html - 2025-05-28T20:21:00.397Z + 2025-05-30T04:22:37.671Z https://docs.axolotl.ai/docs/api/common.architectures.html - 2025-05-28T20:21:01.329Z + 2025-05-30T04:22:38.612Z https://docs.axolotl.ai/docs/api/utils.optimizers.adopt.html - 2025-05-28T20:21:01.085Z + 2025-05-30T04:22:38.362Z https://docs.axolotl.ai/docs/api/utils.callbacks.lisa.html - 2025-05-28T20:21:01.428Z + 2025-05-30T04:22:38.710Z https://docs.axolotl.ai/docs/api/monkeypatch.gradient_checkpointing.offload_cpu.html - 2025-05-28T20:21:00.951Z + 2025-05-30T04:22:38.227Z https://docs.axolotl.ai/docs/api/integrations.lm_eval.args.html - 2025-05-28T20:21:01.324Z + 2025-05-30T04:22:38.607Z https://docs.axolotl.ai/docs/api/utils.model_shard_quant.html - 2025-05-28T20:21:01.003Z + 2025-05-30T04:22:38.281Z https://docs.axolotl.ai/docs/api/utils.schemas.utils.html - 2025-05-28T20:21:01.189Z + 2025-05-30T04:22:38.470Z https://docs.axolotl.ai/docs/api/core.trainers.trl.html - 2025-05-28T20:21:00.439Z + 2025-05-30T04:22:37.711Z https://docs.axolotl.ai/docs/api/utils.collators.batching.html - 2025-05-28T20:21:01.368Z + 2025-05-30T04:22:38.651Z https://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_w_system.html - 2025-05-28T20:21:00.599Z + 2025-05-30T04:22:37.870Z https://docs.axolotl.ai/docs/api/integrations.base.html - 2025-05-28T20:21:01.306Z + 2025-05-30T04:22:38.588Z https://docs.axolotl.ai/docs/api/core.chat.messages.html - 2025-05-28T20:21:00.203Z + 2025-05-30T04:22:37.476Z https://docs.axolotl.ai/docs/api/kernels.lora.html - 2025-05-28T20:21:00.806Z + 2025-05-30T04:22:38.078Z https://docs.axolotl.ai/docs/api/utils.callbacks.perplexity.html - 2025-05-28T20:21:01.422Z + 2025-05-30T04:22:38.705Z https://docs.axolotl.ai/docs/api/utils.schemas.training.html - 2025-05-28T20:21:01.128Z + 2025-05-30T04:22:38.408Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.user_defined.html - 2025-05-28T20:21:00.682Z + 2025-05-30T04:22:37.953Z https://docs.axolotl.ai/docs/api/utils.samplers.multipack.html - 2025-05-28T20:21:01.416Z + 2025-05-30T04:22:38.699Z https://docs.axolotl.ai/docs/api/utils.collators.mamba.html - 2025-05-28T20:21:01.372Z + 2025-05-30T04:22:38.654Z https://docs.axolotl.ai/docs/api/cli.merge_sharded_fsdp_weights.html - 2025-05-28T20:21:00.344Z + 2025-05-30T04:22:37.618Z - https://docs.axolotl.ai/docs/api/utils.callbacks.profiler.html - 2025-05-28T20:21:01.426Z - - - https://docs.axolotl.ai/docs/api/loaders.tokenizer.html - 2025-05-28T20:21:00.494Z - - - https://docs.axolotl.ai/docs/api/core.datasets.transforms.chat_builder.html - 2025-05-28T20:21:00.220Z - - - https://docs.axolotl.ai/docs/api/cli.vllm_serve.html - 2025-05-28T20:21:00.394Z - - - https://docs.axolotl.ai/docs/api/cli.config.html - 2025-05-28T20:21:00.310Z - - - https://docs.axolotl.ai/docs/api/utils.schemas.trl.html - 2025-05-28T20:21:01.157Z - - - https://docs.axolotl.ai/docs/api/core.trainers.mamba.html - 2025-05-28T20:21:00.444Z - - - https://docs.axolotl.ai/docs/api/integrations.grokfast.optimizer.html - 2025-05-28T20:21:01.310Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.chat_template.html - 2025-05-28T20:21:00.572Z - - - https://docs.axolotl.ai/docs/api/utils.schemas.multimodal.html - 2025-05-28T20:21:01.162Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.utils.html - 2025-05-28T20:21:00.919Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.kto.user_defined.html - 2025-05-28T20:21:00.701Z - - - https://docs.axolotl.ai/docs/api/core.trainers.mixins.rng_state_loader.html - 2025-05-28T20:21:00.522Z - - - https://docs.axolotl.ai/docs/api/loaders.adapter.html - 2025-05-28T20:21:00.501Z - - - https://docs.axolotl.ai/docs/api/integrations.liger.args.html - 2025-05-28T20:21:01.321Z - - - https://docs.axolotl.ai/docs/api/utils.data.sft.html - 2025-05-28T20:21:01.088Z - - - https://docs.axolotl.ai/docs/api/utils.freeze.html - 2025-05-28T20:21:01.014Z - - - https://docs.axolotl.ai/docs/api/cli.preprocess.html - 2025-05-28T20:21:00.352Z - - - https://docs.axolotl.ai/docs/api/index.html - 2025-05-28T20:20:59.940Z - - - https://docs.axolotl.ai/docs/api/cli.main.html - 2025-05-28T20:21:00.251Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.chatml.html - 2025-05-28T20:21:00.679Z - - - https://docs.axolotl.ai/docs/api/core.trainer_builder.html - 2025-05-28T20:21:00.093Z - - - https://docs.axolotl.ai/docs/api/train.html - 2025-05-28T20:21:00.000Z - - - https://docs.axolotl.ai/docs/api/utils.callbacks.qat.html - 2025-05-28T20:21:01.442Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.stablelm_attn_hijack_flash.html - 2025-05-28T20:21:00.927Z - - - https://docs.axolotl.ai/docs/api/utils.schemas.enums.html - 2025-05-28T20:21:01.184Z - - - https://docs.axolotl.ai/docs/api/loaders.constants.html - 2025-05-28T20:21:00.511Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.metharme.html - 2025-05-28T20:21:00.643Z - - - https://docs.axolotl.ai/docs/api/kernels.swiglu.html - 2025-05-28T20:21:00.826Z - - - https://docs.axolotl.ai/docs/api/core.trainers.base.html - 2025-05-28T20:21:00.422Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.relora.html - 2025-05-28T20:21:00.885Z - - - https://docs.axolotl.ai/docs/api/cli.cloud.modal_.html - 2025-05-28T20:21:00.403Z - - - https://docs.axolotl.ai/docs/api/core.training_args.html - 2025-05-28T20:21:00.180Z - - - https://docs.axolotl.ai/docs/api/utils.callbacks.comet_.html - 2025-05-28T20:21:01.435Z - - - https://docs.axolotl.ai/docs/api/core.chat.format.shared.html - 2025-05-28T20:21:00.207Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.lora_kernels.html - 2025-05-28T20:21:00.911Z - - - https://docs.axolotl.ai/docs/api/cli.merge_lora.html - 2025-05-28T20:21:00.332Z - - - https://docs.axolotl.ai/docs/api/utils.trainer.html - 2025-05-28T20:21:01.031Z - - - https://docs.axolotl.ai/docs/api/utils.dict.html - 2025-05-28T20:21:01.077Z - - - https://docs.axolotl.ai/docs/api/kernels.quantize.html - 2025-05-28T20:21:00.833Z - - - https://docs.axolotl.ai/docs/api/core.trainers.utils.html - 2025-05-28T20:21:00.477Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.data.batch_dataset_fetcher.html - 2025-05-28T20:21:00.946Z - - - https://docs.axolotl.ai/docs/api/utils.ctx_managers.sequence_parallel.html - 2025-05-28T20:21:00.553Z - - - https://docs.axolotl.ai/docs/api/core.trainers.grpo.sampler.html - 2025-05-28T20:21:00.475Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.user_defined.html - 2025-05-28T20:21:00.607Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.llama_attn_hijack_flash.html - 2025-05-28T20:21:00.861Z - - - https://docs.axolotl.ai/docs/api/cli.train.html - 2025-05-28T20:21:00.259Z - - - https://docs.axolotl.ai/docs/api/cli.quantize.html - 2025-05-28T20:21:00.408Z - - - https://docs.axolotl.ai/docs/api/cli.utils.html - 2025-05-28T20:21:00.389Z - - - https://docs.axolotl.ai/src/axolotl/integrations/LICENSE.html - 2025-05-28T20:20:30.213Z - - - https://docs.axolotl.ai/FAQS.html - 2025-05-28T20:20:30.191Z - - - https://docs.axolotl.ai/examples/colab-notebooks/colab-axolotl-example.html - 2025-05-28T20:20:30.198Z - - - https://docs.axolotl.ai/index.html - 2025-05-28T20:20:30.210Z - - - https://docs.axolotl.ai/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html - 2025-05-28T20:20:30.214Z - - - https://docs.axolotl.ai/docs/api/datasets.html - 2025-05-28T20:21:00.018Z - - - https://docs.axolotl.ai/docs/api/core.trainers.mixins.optimizer.html - 2025-05-28T20:21:00.516Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.orpo.chat_template.html - 2025-05-28T20:21:00.721Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.completion.html - 2025-05-28T20:21:00.625Z - - - https://docs.axolotl.ai/docs/api/evaluate.html - 2025-05-28T20:21:00.011Z - - - https://docs.axolotl.ai/docs/api/loaders.processor.html - 2025-05-28T20:21:00.496Z - - - https://docs.axolotl.ai/docs/api/common.const.html - 2025-05-28T20:21:01.330Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.bradley_terry.llama3.html - 2025-05-28T20:21:00.725Z - - - https://docs.axolotl.ai/docs/api/utils.distributed.html - 2025-05-28T20:21:01.074Z - - - https://docs.axolotl.ai/docs/api/utils.tokenization.html - 2025-05-28T20:21:00.983Z - - - https://docs.axolotl.ai/docs/api/utils.schemas.integrations.html - 2025-05-28T20:21:01.173Z - - - https://docs.axolotl.ai/docs/api/utils.schedulers.html - 2025-05-28T20:21:01.055Z - - - https://docs.axolotl.ai/docs/api/core.trainers.mixins.scheduler.html - 2025-05-28T20:21:00.530Z - - - https://docs.axolotl.ai/docs/api/core.datasets.chat.html - 2025-05-28T20:21:00.212Z - - - https://docs.axolotl.ai/docs/api/utils.callbacks.mlflow_.html - 2025-05-28T20:21:01.431Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.mistral_attn_hijack_flash.html - 2025-05-28T20:21:00.876Z - - - https://docs.axolotl.ai/docs/api/core.chat.format.llama3x.html - 2025-05-28T20:21:00.206Z - - - https://docs.axolotl.ai/docs/api/cli.checks.html - 2025-05-28T20:21:00.293Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.transformers_fa_utils.html - 2025-05-28T20:21:00.937Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.llama2_chat.html - 2025-05-28T20:21:00.620Z - - - https://docs.axolotl.ai/docs/api/convert.html - 2025-05-28T20:21:00.031Z - - - https://docs.axolotl.ai/docs/api/utils.schemas.datasets.html - 2025-05-28T20:21:01.145Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.llama3.html - 2025-05-28T20:21:00.669Z - - - https://docs.axolotl.ai/docs/api/cli.args.html - 2025-05-28T20:21:00.287Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.llama_attn_hijack_xformers.html - 2025-05-28T20:21:00.862Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.chat_template.html - 2025-05-28T20:21:00.659Z - - - https://docs.axolotl.ai/docs/api/models.mamba.modeling_mamba.html - 2025-05-28T20:21:01.348Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.messages.chat.html - 2025-05-28T20:21:00.657Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.llama_patch_multipack.html - 2025-05-28T20:21:00.922Z - - - https://docs.axolotl.ai/docs/api/loaders.patch_manager.html - 2025-05-28T20:21:00.509Z - - - https://docs.axolotl.ai/docs/api/utils.schemas.peft.html - 2025-05-28T20:21:01.153Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.llama_expand_mask.html - 2025-05-28T20:21:00.886Z - - - https://docs.axolotl.ai/docs/api/core.trainers.grpo.trainer.html - 2025-05-28T20:21:00.463Z - - - https://docs.axolotl.ai/docs/api/integrations.spectrum.args.html - 2025-05-28T20:21:01.327Z - - - https://docs.axolotl.ai/docs/api/core.trainers.relora.html - 2025-05-28T20:21:00.448Z - - - https://docs.axolotl.ai/docs/api/loaders.model.html - 2025-05-28T20:21:00.486Z - - - https://docs.axolotl.ai/docs/api/utils.bench.html - 2025-05-28T20:21:01.006Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.input_output.html - 2025-05-28T20:21:00.631Z - - - https://docs.axolotl.ai/docs/api/cli.inference.html - 2025-05-28T20:21:00.324Z - - - https://docs.axolotl.ai/docs/api/core.trainers.dpo.trainer.html - 2025-05-28T20:21:00.454Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html - 2025-05-28T20:21:00.976Z - - - https://docs.axolotl.ai/docs/api/utils.lora.html - 2025-05-28T20:21:00.998Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.btlm_attn_hijack_flash.html - 2025-05-28T20:21:00.920Z - - - https://docs.axolotl.ai/docs/api/utils.chat_templates.html - 2025-05-28T20:21:00.993Z - - - https://docs.axolotl.ai/docs/api/integrations.cut_cross_entropy.args.html - 2025-05-28T20:21:01.309Z - - - https://docs.axolotl.ai/docs/api/monkeypatch.trainer_fsdp_optim.html - 2025-05-28T20:21:00.931Z - - - https://docs.axolotl.ai/docs/api/prompt_strategies.stepwise_supervised.html - 2025-05-28T20:21:00.636Z + https://docs.axolotl.ai/docs/api/core.builders.base.html + 2025-05-30T04:22:37.356Z https://docs.axolotl.ai/docs/api/common.datasets.html - 2025-05-28T20:21:01.347Z + 2025-05-30T04:22:38.630Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.stepwise_supervised.html + 2025-05-30T04:22:37.907Z + + + https://docs.axolotl.ai/docs/api/core.datasets.transforms.chat_builder.html + 2025-05-30T04:22:37.493Z + + + https://docs.axolotl.ai/docs/api/cli.vllm_serve.html + 2025-05-30T04:22:37.667Z + + + https://docs.axolotl.ai/docs/api/cli.config.html + 2025-05-30T04:22:37.584Z + + + https://docs.axolotl.ai/docs/api/utils.schemas.trl.html + 2025-05-30T04:22:38.437Z + + + https://docs.axolotl.ai/docs/api/core.trainers.mamba.html + 2025-05-30T04:22:37.717Z + + + https://docs.axolotl.ai/docs/api/integrations.grokfast.optimizer.html + 2025-05-30T04:22:38.593Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.chat_template.html + 2025-05-30T04:22:37.843Z + + + https://docs.axolotl.ai/docs/api/utils.schemas.multimodal.html + 2025-05-30T04:22:38.442Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.utils.html + 2025-05-30T04:22:38.195Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.kto.user_defined.html + 2025-05-30T04:22:37.972Z + + + https://docs.axolotl.ai/docs/api/core.trainers.mixins.rng_state_loader.html + 2025-05-30T04:22:37.794Z + + + https://docs.axolotl.ai/docs/api/loaders.adapter.html + 2025-05-30T04:22:37.776Z + + + https://docs.axolotl.ai/docs/api/integrations.liger.args.html + 2025-05-30T04:22:38.604Z + + + https://docs.axolotl.ai/docs/api/utils.data.sft.html + 2025-05-30T04:22:38.365Z + + + https://docs.axolotl.ai/docs/api/utils.freeze.html + 2025-05-30T04:22:38.292Z + + + https://docs.axolotl.ai/docs/api/cli.preprocess.html + 2025-05-30T04:22:37.626Z + + + https://docs.axolotl.ai/docs/api/index.html + 2025-05-30T04:22:37.214Z + + + https://docs.axolotl.ai/docs/api/cli.main.html + 2025-05-30T04:22:37.524Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.chatml.html + 2025-05-30T04:22:37.950Z + + + https://docs.axolotl.ai/docs/api/models.mamba.modeling_mamba.html + 2025-05-30T04:22:38.631Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.chat_template.html + 2025-05-30T04:22:37.930Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.llama_attn_hijack_xformers.html + 2025-05-30T04:22:38.135Z + + + https://docs.axolotl.ai/docs/api/cli.args.html + 2025-05-30T04:22:37.560Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.llama3.html + 2025-05-30T04:22:37.940Z + + + https://docs.axolotl.ai/docs/api/utils.schemas.datasets.html + 2025-05-30T04:22:38.425Z + + + https://docs.axolotl.ai/docs/api/convert.html + 2025-05-30T04:22:37.304Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.llama2_chat.html + 2025-05-30T04:22:37.890Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.transformers_fa_utils.html + 2025-05-30T04:22:38.213Z + + + https://docs.axolotl.ai/docs/api/cli.checks.html + 2025-05-30T04:22:37.566Z + + + https://docs.axolotl.ai/docs/api/core.chat.format.llama3x.html + 2025-05-30T04:22:37.479Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.mistral_attn_hijack_flash.html + 2025-05-30T04:22:38.150Z + + + https://docs.axolotl.ai/docs/api/utils.callbacks.mlflow_.html + 2025-05-30T04:22:38.713Z + + + https://docs.axolotl.ai/docs/api/core.datasets.chat.html + 2025-05-30T04:22:37.486Z + + + https://docs.axolotl.ai/docs/api/core.trainers.mixins.scheduler.html + 2025-05-30T04:22:37.800Z + + + https://docs.axolotl.ai/docs/api/utils.schedulers.html + 2025-05-30T04:22:38.332Z + + + https://docs.axolotl.ai/docs/api/utils.schemas.integrations.html + 2025-05-30T04:22:38.454Z + + + https://docs.axolotl.ai/docs/api/utils.tokenization.html + 2025-05-30T04:22:38.260Z + + + https://docs.axolotl.ai/docs/api/utils.distributed.html + 2025-05-30T04:22:38.352Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.bradley_terry.llama3.html + 2025-05-30T04:22:37.996Z + + + https://docs.axolotl.ai/docs/api/common.const.html + 2025-05-30T04:22:38.613Z + + + https://docs.axolotl.ai/docs/api/loaders.processor.html + 2025-05-30T04:22:37.770Z + + + https://docs.axolotl.ai/docs/api/evaluate.html + 2025-05-30T04:22:37.284Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.completion.html + 2025-05-30T04:22:37.897Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.orpo.chat_template.html + 2025-05-30T04:22:37.992Z + + + https://docs.axolotl.ai/docs/api/core.trainers.mixins.optimizer.html + 2025-05-30T04:22:37.791Z + + + https://docs.axolotl.ai/docs/api/datasets.html + 2025-05-30T04:22:37.291Z + + + https://docs.axolotl.ai/docs/api/cli.utils.html + 2025-05-30T04:22:37.663Z + + + https://docs.axolotl.ai/src/axolotl/integrations/LICENSE.html + 2025-05-30T04:21:58.292Z + + + https://docs.axolotl.ai/FAQS.html + 2025-05-30T04:21:58.270Z + + + https://docs.axolotl.ai/examples/colab-notebooks/colab-axolotl-example.html + 2025-05-30T04:21:58.276Z + + + https://docs.axolotl.ai/index.html + 2025-05-30T04:21:58.288Z + + + https://docs.axolotl.ai/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html + 2025-05-30T04:21:58.292Z + + + https://docs.axolotl.ai/docs/api/core.builders.causal.html + 2025-05-30T04:22:37.360Z + + + https://docs.axolotl.ai/docs/api/cli.quantize.html + 2025-05-30T04:22:37.682Z + + + https://docs.axolotl.ai/docs/api/cli.train.html + 2025-05-30T04:22:37.532Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.llama_attn_hijack_flash.html + 2025-05-30T04:22:38.133Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.user_defined.html + 2025-05-30T04:22:37.878Z + + + https://docs.axolotl.ai/docs/api/core.trainers.grpo.sampler.html + 2025-05-30T04:22:37.750Z + + + https://docs.axolotl.ai/docs/api/utils.ctx_managers.sequence_parallel.html + 2025-05-30T04:22:37.824Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.data.batch_dataset_fetcher.html + 2025-05-30T04:22:38.223Z + + + https://docs.axolotl.ai/docs/api/core.trainers.utils.html + 2025-05-30T04:22:37.751Z + + + https://docs.axolotl.ai/docs/api/kernels.quantize.html + 2025-05-30T04:22:38.106Z + + + https://docs.axolotl.ai/docs/api/utils.dict.html + 2025-05-30T04:22:38.355Z + + + https://docs.axolotl.ai/docs/api/utils.trainer.html + 2025-05-30T04:22:38.308Z + + + https://docs.axolotl.ai/docs/api/cli.merge_lora.html + 2025-05-30T04:22:37.606Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.lora_kernels.html + 2025-05-30T04:22:38.187Z + + + https://docs.axolotl.ai/docs/api/core.chat.format.shared.html + 2025-05-30T04:22:37.481Z + + + https://docs.axolotl.ai/docs/api/utils.callbacks.comet_.html + 2025-05-30T04:22:38.717Z + + + https://docs.axolotl.ai/docs/api/core.training_args.html + 2025-05-30T04:22:37.454Z + + + https://docs.axolotl.ai/docs/api/cli.cloud.modal_.html + 2025-05-30T04:22:37.677Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.relora.html + 2025-05-30T04:22:38.158Z + + + https://docs.axolotl.ai/docs/api/core.trainers.base.html + 2025-05-30T04:22:37.695Z + + + https://docs.axolotl.ai/docs/api/kernels.swiglu.html + 2025-05-30T04:22:38.099Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.metharme.html + 2025-05-30T04:22:37.914Z + + + https://docs.axolotl.ai/docs/api/loaders.constants.html + 2025-05-30T04:22:37.785Z + + + https://docs.axolotl.ai/docs/api/utils.schemas.enums.html + 2025-05-30T04:22:38.465Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.stablelm_attn_hijack_flash.html + 2025-05-30T04:22:38.203Z + + + https://docs.axolotl.ai/docs/api/utils.callbacks.qat.html + 2025-05-30T04:22:38.724Z + + + https://docs.axolotl.ai/docs/api/train.html + 2025-05-30T04:22:37.274Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.messages.chat.html + 2025-05-30T04:22:37.928Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.llama_patch_multipack.html + 2025-05-30T04:22:38.198Z + + + https://docs.axolotl.ai/docs/api/loaders.patch_manager.html + 2025-05-30T04:22:37.784Z + + + https://docs.axolotl.ai/docs/api/utils.schemas.peft.html + 2025-05-30T04:22:38.434Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.llama_expand_mask.html + 2025-05-30T04:22:38.160Z + + + https://docs.axolotl.ai/docs/api/core.trainers.grpo.trainer.html + 2025-05-30T04:22:37.738Z + + + https://docs.axolotl.ai/docs/api/integrations.spectrum.args.html + 2025-05-30T04:22:38.610Z + + + https://docs.axolotl.ai/docs/api/core.trainers.relora.html + 2025-05-30T04:22:37.721Z + + + https://docs.axolotl.ai/docs/api/loaders.model.html + 2025-05-30T04:22:37.761Z + + + https://docs.axolotl.ai/docs/api/utils.bench.html + 2025-05-30T04:22:38.284Z + + + https://docs.axolotl.ai/docs/api/prompt_strategies.input_output.html + 2025-05-30T04:22:37.903Z + + + https://docs.axolotl.ai/docs/api/cli.inference.html + 2025-05-30T04:22:37.598Z + + + https://docs.axolotl.ai/docs/api/core.trainers.dpo.trainer.html + 2025-05-30T04:22:37.727Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.gradient_checkpointing.offload_disk.html + 2025-05-30T04:22:38.254Z + + + https://docs.axolotl.ai/docs/api/utils.lora.html + 2025-05-30T04:22:38.275Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.btlm_attn_hijack_flash.html + 2025-05-30T04:22:38.196Z + + + https://docs.axolotl.ai/docs/api/utils.chat_templates.html + 2025-05-30T04:22:38.270Z + + + https://docs.axolotl.ai/docs/api/integrations.cut_cross_entropy.args.html + 2025-05-30T04:22:38.592Z + + + https://docs.axolotl.ai/docs/api/monkeypatch.trainer_fsdp_optim.html + 2025-05-30T04:22:38.207Z + + + https://docs.axolotl.ai/docs/api/core.builders.rl.html + 2025-05-30T04:22:37.368Z + + + https://docs.axolotl.ai/docs/api/loaders.tokenizer.html + 2025-05-30T04:22:37.769Z + + + https://docs.axolotl.ai/docs/api/utils.callbacks.profiler.html + 2025-05-30T04:22:38.708Z https://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_instruct.html - 2025-05-28T20:21:00.587Z + 2025-05-30T04:22:37.858Z https://docs.axolotl.ai/docs/api/core.chat.format.chatml.html - 2025-05-28T20:21:00.204Z + 2025-05-30T04:22:37.478Z https://docs.axolotl.ai/docs/api/monkeypatch.attention.mllama.html - 2025-05-28T20:21:00.945Z + 2025-05-30T04:22:38.221Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.zephyr.html - 2025-05-28T20:21:00.681Z + 2025-05-30T04:22:37.952Z https://docs.axolotl.ai/docs/api/utils.schemas.config.html - 2025-05-28T20:21:01.116Z + 2025-05-30T04:22:38.396Z https://docs.axolotl.ai/docs/api/monkeypatch.unsloth_.html - 2025-05-28T20:21:00.938Z + 2025-05-30T04:22:38.215Z https://docs.axolotl.ai/docs/api/prompt_strategies.dpo.passthrough.html - 2025-05-28T20:21:00.683Z + 2025-05-30T04:22:37.955Z https://docs.axolotl.ai/docs/api/prompt_strategies.orcamini.html - 2025-05-28T20:21:00.646Z + 2025-05-30T04:22:37.918Z https://docs.axolotl.ai/docs/api/cli.evaluate.html - 2025-05-28T20:21:00.267Z + 2025-05-30T04:22:37.541Z https://docs.axolotl.ai/docs/api/monkeypatch.multipack.html - 2025-05-28T20:21:00.878Z + 2025-05-30T04:22:38.151Z https://docs.axolotl.ai/docs/api/cli.sweeps.html - 2025-05-28T20:21:00.358Z + 2025-05-30T04:22:37.631Z https://docs.axolotl.ai/docs/api/prompt_strategies.kto.chatml.html - 2025-05-28T20:21:00.699Z + 2025-05-30T04:22:37.971Z https://docs.axolotl.ai/docs/api/prompt_strategies.base.html - 2025-05-28T20:21:00.554Z + 2025-05-30T04:22:37.826Z https://docs.axolotl.ai/docs/api/kernels.utils.html - 2025-05-28T20:21:00.835Z + 2025-05-30T04:22:38.108Z https://docs.axolotl.ai/docs/api/utils.schemas.model.html - 2025-05-28T20:21:01.123Z + 2025-05-30T04:22:38.403Z https://docs.axolotl.ai/docs/api/utils.data.pretraining.html - 2025-05-28T20:21:01.086Z + 2025-05-30T04:22:38.364Z https://docs.axolotl.ai/docs/api/monkeypatch.mixtral.html - 2025-05-28T20:21:00.948Z + 2025-05-30T04:22:38.224Z https://docs.axolotl.ai/docs/api/logging_config.html - 2025-05-28T20:21:00.078Z + 2025-05-30T04:22:37.349Z https://docs.axolotl.ai/docs/api/prompt_strategies.alpaca_chat.html - 2025-05-28T20:21:00.586Z + 2025-05-30T04:22:37.857Z https://docs.axolotl.ai/docs/api/prompt_strategies.kto.llama3.html - 2025-05-28T20:21:00.691Z + 2025-05-30T04:22:37.963Z https://docs.axolotl.ai/docs/api/integrations.kd.trainer.html - 2025-05-28T20:21:01.318Z + 2025-05-30T04:22:38.600Z https://docs.axolotl.ai/docs/api/utils.quantization.html - 2025-05-28T20:21:01.108Z + 2025-05-30T04:22:38.385Z https://docs.axolotl.ai/docs/api/prompt_tokenizers.html - 2025-05-28T20:21:00.073Z + 2025-05-30T04:22:37.344Z https://docs.axolotl.ai/docs/api/kernels.geglu.html - 2025-05-28T20:21:00.816Z + 2025-05-30T04:22:38.089Z https://docs.axolotl.ai/docs/custom_integrations.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.271Z https://docs.axolotl.ai/docs/multimodal.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/multi-node.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/docker.html - 2025-05-28T20:20:30.194Z + 2025-05-30T04:21:58.272Z https://docs.axolotl.ai/docs/amd_hpc.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.271Z https://docs.axolotl.ai/docs/dataset_loading.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.272Z https://docs.axolotl.ai/docs/qat.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/cli.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.271Z https://docs.axolotl.ai/docs/config.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.271Z https://docs.axolotl.ai/docs/unsloth.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/ray-integration.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/dataset-formats/stepwise_supervised.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.272Z https://docs.axolotl.ai/docs/dataset-formats/index.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.271Z https://docs.axolotl.ai/docs/dataset-formats/tokenized.html - 2025-05-28T20:20:30.193Z + 2025-05-30T04:21:58.272Z https://docs.axolotl.ai/docs/installation.html - 2025-05-28T20:20:30.196Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/inference.html - 2025-05-28T20:20:30.196Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/mac.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z https://docs.axolotl.ai/docs/getting-started.html - 2025-05-28T20:20:30.194Z + 2025-05-30T04:21:58.272Z https://docs.axolotl.ai/docs/reward_modelling.html - 2025-05-28T20:20:30.197Z + 2025-05-30T04:21:58.275Z