Compare commits
56 Commits
feat/wizar
...
v0.9.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54bbc9bb72 | ||
|
|
5aefebe1fe | ||
|
|
5a36b6ff2d | ||
|
|
224da88fa2 | ||
|
|
493eb8e5c6 | ||
|
|
4780ac7c4d | ||
|
|
cf69de2eb9 | ||
|
|
27e3329273 | ||
|
|
27fec49083 | ||
|
|
8cda9e93c1 | ||
|
|
17d715c2b3 | ||
|
|
f943306263 | ||
|
|
3c8b9b33d6 | ||
|
|
8b0c2a71ad | ||
|
|
493910559a | ||
|
|
c54534dbfa | ||
|
|
cae5cebb59 | ||
|
|
fcbd7477d0 | ||
|
|
038db85a40 | ||
|
|
680dcc5a4d | ||
|
|
fed5ca8254 | ||
|
|
7a2d017c88 | ||
|
|
8c0303aa5e | ||
|
|
5d61169f7c | ||
|
|
e1586f7919 | ||
|
|
e4bf3ffb17 | ||
|
|
30150fe1e1 | ||
|
|
7f7d7ade2e | ||
|
|
776cf70fe4 | ||
|
|
8730951aba | ||
|
|
e72c11ad55 | ||
|
|
1a7978b960 | ||
|
|
60b0d14f1d | ||
|
|
a7a40378f5 | ||
|
|
b50d35bec9 | ||
|
|
bc6dfa6899 | ||
|
|
9d6e8af622 | ||
|
|
17b441248c | ||
|
|
d49a4268b8 | ||
|
|
1d6e931115 | ||
|
|
ff106ace44 | ||
|
|
24907533d1 | ||
|
|
0e9d816d2e | ||
|
|
72f142186a | ||
|
|
87726322bf | ||
|
|
ae8ae7534c | ||
|
|
ee00142cb5 | ||
|
|
097e7e3b5b | ||
|
|
c714958181 | ||
|
|
4402c293dc | ||
|
|
0d71f787a3 | ||
|
|
c337ca0872 | ||
|
|
f04f7cf5ad | ||
|
|
c64a951bc9 | ||
|
|
fc88cc56cb | ||
|
|
e85cbb8645 |
6
.github/workflows/tests.yml
vendored
6
.github/workflows/tests.yml
vendored
@@ -347,12 +347,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.6.0
|
|
||||||
num_gpus: 1
|
|
||||||
axolotl_extras: llmcompressor
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
20
_quarto.yml
20
_quarto.yml
@@ -48,23 +48,8 @@ quartodoc:
|
|||||||
contents:
|
contents:
|
||||||
- core.trainers.base
|
- core.trainers.base
|
||||||
- core.trainers.trl
|
- core.trainers.trl
|
||||||
- core.trainers.mamba
|
|
||||||
- core.trainers.relora
|
|
||||||
- core.trainers.dpo.trainer
|
- core.trainers.dpo.trainer
|
||||||
- core.trainers.grpo.trainer
|
- core.trainers.grpo.trainer
|
||||||
- core.trainers.grpo.sampler
|
|
||||||
- core.trainers.utils
|
|
||||||
- title: Mixins
|
|
||||||
desc: Mixin classes for augmenting trainers
|
|
||||||
contents:
|
|
||||||
- core.trainers.mixins.optimizer
|
|
||||||
- core.trainers.mixins.rng_state_loader
|
|
||||||
- core.trainers.mixins.scheduler
|
|
||||||
- core.trainers.mixins.sequence_parallel
|
|
||||||
- title: Context Managers
|
|
||||||
desc: Context managers for altering trainer behaviors
|
|
||||||
contents:
|
|
||||||
- utils.ctx_managers.sequence_parallel
|
|
||||||
- title: Prompt Strategies
|
- title: Prompt Strategies
|
||||||
desc: Prompt formatting strategies
|
desc: Prompt formatting strategies
|
||||||
contents:
|
contents:
|
||||||
@@ -101,7 +86,7 @@ quartodoc:
|
|||||||
- kernels.swiglu
|
- kernels.swiglu
|
||||||
- kernels.quantize
|
- kernels.quantize
|
||||||
- kernels.utils
|
- kernels.utils
|
||||||
- title: Monkey Patches
|
- title: MonkeyPatches
|
||||||
desc: Runtime patches for model optimizations
|
desc: Runtime patches for model optimizations
|
||||||
contents:
|
contents:
|
||||||
- monkeypatch.llama_attn_hijack_flash
|
- monkeypatch.llama_attn_hijack_flash
|
||||||
@@ -139,7 +124,8 @@ quartodoc:
|
|||||||
- utils.optimizers.adopt
|
- utils.optimizers.adopt
|
||||||
- utils.data.pretraining
|
- utils.data.pretraining
|
||||||
- utils.data.sft
|
- utils.data.sft
|
||||||
- utils.gradient_checkpointing.unsloth
|
- utils.gradient_checkpointing.offload_cpu
|
||||||
|
- utils.gradient_checkpointing.offload_disk
|
||||||
- title: Schemas
|
- title: Schemas
|
||||||
desc: Pydantic data models for Axolotl config
|
desc: Pydantic data models for Axolotl config
|
||||||
contents:
|
contents:
|
||||||
|
|||||||
@@ -539,7 +539,7 @@ train_on_inputs: false
|
|||||||
# Note that training loss may have an oscillating pattern with this enabled.
|
# Note that training loss may have an oscillating pattern with this enabled.
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
|
|
||||||
# Whether to use gradient checkpointing. Available options are: true, false, "offload".
|
# Whether to use gradient checkpointing. Available options are: true, false, "offload", "offload_disk".
|
||||||
# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing
|
# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing
|
||||||
gradient_checkpointing: false
|
gradient_checkpointing: false
|
||||||
# additional kwargs to pass to the trainer for gradient checkpointing
|
# additional kwargs to pass to the trainer for gradient checkpointing
|
||||||
|
|||||||
@@ -49,8 +49,7 @@ sections = [
|
|||||||
("Knowledge Distillation (KD)", "kd"),
|
("Knowledge Distillation (KD)", "kd"),
|
||||||
("Liger Kernels", "liger"),
|
("Liger Kernels", "liger"),
|
||||||
("Language Model Evaluation Harness (LM Eval)", "lm_eval"),
|
("Language Model Evaluation Harness (LM Eval)", "lm_eval"),
|
||||||
("Spectrum", "spectrum"),
|
("Spectrum", "spectrum")
|
||||||
("LLMCompressor", "llm_compressor")
|
|
||||||
]
|
]
|
||||||
|
|
||||||
for section_name, folder_name in sections:
|
for section_name, folder_name in sections:
|
||||||
|
|||||||
@@ -3,6 +3,8 @@ title: Sequence Parallelism
|
|||||||
description: Train with long sequences split across multiple GPUs.
|
description: Train with long sequences split across multiple GPUs.
|
||||||
---
|
---
|
||||||
|
|
||||||
|
# Sequence Parallelism
|
||||||
|
|
||||||
Sequence parallelism is a technique that splits sequences across multiple GPUs,
|
Sequence parallelism is a technique that splits sequences across multiple GPUs,
|
||||||
allowing you to train with very long sequences that wouldn't fit on a single GPU. Each
|
allowing you to train with very long sequences that wouldn't fit on a single GPU. Each
|
||||||
GPU processes a different portion of the sequence, and the results are aggregated
|
GPU processes a different portion of the sequence, and the results are aggregated
|
||||||
@@ -25,7 +27,7 @@ To enable sequence parallelism, add the following to your configuration file:
|
|||||||
sequence_parallel_degree: 4 # Split sequences across 4 GPUs
|
sequence_parallel_degree: 4 # Split sequences across 4 GPUs
|
||||||
# Optional; strides across the key dimension. Larger values use more memory but should make training faster.
|
# Optional; strides across the key dimension. Larger values use more memory but should make training faster.
|
||||||
heads_k_stride: 1
|
heads_k_stride: 1
|
||||||
# Optional; one of "varlen_llama3" or "batch_ring". Defaults to
|
# Optional; one of "varlen_llama3", "batch_ring", "batch_zigzag", "batch_stripe". Defaults to
|
||||||
# "varlen_llama3" when `sample_packing: true`, and "batch_ring" otherwise.
|
# "varlen_llama3" when `sample_packing: true`, and "batch_ring" otherwise.
|
||||||
ring_attn_func:
|
ring_attn_func:
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,77 +0,0 @@
|
|||||||
base_model: neuralmagic/Sparse-Llama-3.1-8B-2of4
|
|
||||||
|
|
||||||
plugins:
|
|
||||||
- axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
|
||||||
|
|
||||||
load_in_8bit: false
|
|
||||||
load_in_4bit: false
|
|
||||||
strict: false
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: tatsu-lab/alpaca
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path: last_run_prepared
|
|
||||||
val_set_size: 0.05
|
|
||||||
output_dir: ./outputs/out
|
|
||||||
|
|
||||||
sequence_len: 4096
|
|
||||||
sample_packing: true
|
|
||||||
pad_to_sequence_len: true
|
|
||||||
eval_sample_packing: false
|
|
||||||
|
|
||||||
wandb_project:
|
|
||||||
wandb_entity:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_name:
|
|
||||||
wandb_log_model:
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 8
|
|
||||||
micro_batch_size: 1
|
|
||||||
num_epochs: 1
|
|
||||||
optimizer: paged_adamw_8bit
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 2e-5
|
|
||||||
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: auto
|
|
||||||
fp16:
|
|
||||||
tf32: false
|
|
||||||
|
|
||||||
gradient_checkpointing: true
|
|
||||||
gradient_checkpointing_kwargs:
|
|
||||||
use_reentrant: false
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
logging_steps: 1
|
|
||||||
xformers_attention:
|
|
||||||
flash_attention: true
|
|
||||||
|
|
||||||
warmup_steps: 100
|
|
||||||
evals_per_epoch: 2
|
|
||||||
eval_table_size:
|
|
||||||
saves_per_epoch: 1
|
|
||||||
debug:
|
|
||||||
deepspeed:
|
|
||||||
weight_decay: 0.0
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
special_tokens:
|
|
||||||
pad_token: <|end_of_text|>
|
|
||||||
|
|
||||||
llmcompressor:
|
|
||||||
recipe:
|
|
||||||
finetuning_stage:
|
|
||||||
finetuning_modifiers:
|
|
||||||
ConstantPruningModifier:
|
|
||||||
targets: [
|
|
||||||
're:.*q_proj.weight',
|
|
||||||
're:.*k_proj.weight',
|
|
||||||
're:.*v_proj.weight',
|
|
||||||
're:.*o_proj.weight',
|
|
||||||
're:.*gate_proj.weight',
|
|
||||||
're:.*up_proj.weight',
|
|
||||||
're:.*down_proj.weight',
|
|
||||||
]
|
|
||||||
start: 0
|
|
||||||
save_compressed: true
|
|
||||||
3
setup.py
3
setup.py
@@ -150,9 +150,6 @@ extras_require = {
|
|||||||
"vllm": [
|
"vllm": [
|
||||||
"vllm==0.7.2",
|
"vllm==0.7.2",
|
||||||
],
|
],
|
||||||
"llmcompressor": [
|
|
||||||
"llmcompressor==0.5.1",
|
|
||||||
],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
install_requires, dependency_links, extras_require_build = parse_requirements(
|
install_requires, dependency_links, extras_require_build = parse_requirements(
|
||||||
|
|||||||
@@ -4,4 +4,4 @@ import pkgutil
|
|||||||
|
|
||||||
__path__ = pkgutil.extend_path(__path__, __name__) # Make this a namespace package
|
__path__ = pkgutil.extend_path(__path__, __name__) # Make this a namespace package
|
||||||
|
|
||||||
__version__ = "0.10.0.dev0"
|
__version__ = "0.9.2"
|
||||||
|
|||||||
@@ -342,13 +342,6 @@ def delinearize_llama4(model: str, output: str) -> None:
|
|||||||
do_delinearize_llama4(model, output)
|
do_delinearize_llama4(model, output)
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
|
||||||
def wizard():
|
|
||||||
from axolotl.cli.wizard import do_wizard
|
|
||||||
|
|
||||||
do_wizard()
|
|
||||||
|
|
||||||
|
|
||||||
cli.add_command(lm_eval)
|
cli.add_command(lm_eval)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,429 +0,0 @@
|
|||||||
"""Wizard for creating yaml configs."""
|
|
||||||
|
|
||||||
import click
|
|
||||||
import torch
|
|
||||||
import yaml
|
|
||||||
from packaging import version
|
|
||||||
from transformers.training_args import OptimizerNames
|
|
||||||
|
|
||||||
from axolotl.cli.art import print_axolotl_text_art
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
from axolotl.utils.models import load_model_config
|
|
||||||
from axolotl.utils.schemas.enums import CustomSupportedOptimizers
|
|
||||||
|
|
||||||
|
|
||||||
def do_wizard():
|
|
||||||
print_axolotl_text_art()
|
|
||||||
|
|
||||||
# Ask where to save the config
|
|
||||||
cfg = DictDefault({})
|
|
||||||
config_path = click.prompt(
|
|
||||||
"Where do you want to save the config?", type=str, default="config.yaml"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Ask base model
|
|
||||||
base_model = click.prompt("What base model do you want to use?", type=str)
|
|
||||||
cfg["base_model"] = base_model.strip()
|
|
||||||
|
|
||||||
# Ask whether want to enable Vision model
|
|
||||||
# TODO: check if model has vision layers instead of asking user
|
|
||||||
train_vision_model = click.confirm(
|
|
||||||
"If this model has vision layers, do you want to train them?", default=False
|
|
||||||
)
|
|
||||||
|
|
||||||
if train_vision_model:
|
|
||||||
cfg["processor_type"] = "AutoProcessor"
|
|
||||||
cfg["skip_prepare_dataset"] = True
|
|
||||||
cfg["remove_unused_columns"] = False
|
|
||||||
cfg["sample_packing"] = False
|
|
||||||
|
|
||||||
# Ask whether they want to set any advanced model features (custom tokenizer, custom config, etc)
|
|
||||||
advanced_model_features = click.confirm(
|
|
||||||
"Do you want to set any advanced model features? (custom tokenizer, custom config, remote code etc)",
|
|
||||||
default=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
if advanced_model_features:
|
|
||||||
# Ask whether they want to use a custom config
|
|
||||||
base_model_config = click.prompt(
|
|
||||||
"What model config do you want to use? (leave blank for default)",
|
|
||||||
type=str,
|
|
||||||
default="",
|
|
||||||
)
|
|
||||||
|
|
||||||
if base_model_config:
|
|
||||||
cfg["base_model_config"] = base_model_config
|
|
||||||
|
|
||||||
# Ask whether they want to use a specific revision of the model
|
|
||||||
revision_of_model = click.prompt(
|
|
||||||
"What revision of the model do you want to use? (leave blank for default)",
|
|
||||||
type=str,
|
|
||||||
default="",
|
|
||||||
)
|
|
||||||
|
|
||||||
if revision_of_model:
|
|
||||||
cfg["revision_of_model"] = revision_of_model
|
|
||||||
|
|
||||||
# Ask whether they want to use a custom tokenizer
|
|
||||||
tokenizer_config = click.prompt(
|
|
||||||
"What tokenizer do you want to use? (leave blank for default)",
|
|
||||||
type=str,
|
|
||||||
default="",
|
|
||||||
)
|
|
||||||
|
|
||||||
if tokenizer_config:
|
|
||||||
cfg["tokenizer_config"] = tokenizer_config
|
|
||||||
|
|
||||||
# Ask whether they want to use remote code
|
|
||||||
trust_remote_code = click.confirm(
|
|
||||||
"Do you want to use remote code?", default=False
|
|
||||||
)
|
|
||||||
|
|
||||||
if trust_remote_code:
|
|
||||||
cfg["trust_remote_code"] = trust_remote_code
|
|
||||||
|
|
||||||
# Whether to resize token embeddings
|
|
||||||
resize_token_embeddings_to_32x = click.confirm(
|
|
||||||
"Do you want to resize token embeddings to 32x?", default=False
|
|
||||||
)
|
|
||||||
|
|
||||||
if resize_token_embeddings_to_32x:
|
|
||||||
cfg["resize_token_embeddings_to_32x"] = resize_token_embeddings_to_32x
|
|
||||||
|
|
||||||
# Whether to shrink embeddings to len(tokenizer)
|
|
||||||
shrink_embeddings = click.confirm(
|
|
||||||
"Do you want to shrink embeddings to len(tokenizer)?", default=False
|
|
||||||
)
|
|
||||||
|
|
||||||
if shrink_embeddings:
|
|
||||||
cfg["shrink_embeddings"] = shrink_embeddings
|
|
||||||
|
|
||||||
# Whether to skip upcast embeddings
|
|
||||||
embeddings_skip_upcast = click.confirm(
|
|
||||||
"Do you want to skip upcast embeddings?", default=False
|
|
||||||
)
|
|
||||||
|
|
||||||
if embeddings_skip_upcast:
|
|
||||||
cfg["embeddings_skip_upcast"] = embeddings_skip_upcast
|
|
||||||
|
|
||||||
# Whether to random init weights
|
|
||||||
random_init_weights = click.confirm(
|
|
||||||
"Do you want to random init weights?", default=False
|
|
||||||
)
|
|
||||||
|
|
||||||
if random_init_weights:
|
|
||||||
cfg["random_init_weights"] = random_init_weights
|
|
||||||
|
|
||||||
# Get model type
|
|
||||||
config = load_model_config(cfg)
|
|
||||||
model_type = config.model_type
|
|
||||||
|
|
||||||
# Ask sequence length
|
|
||||||
sequence_length = click.prompt("What sequence length do you want to use?", type=int)
|
|
||||||
cfg["sequence_length"] = sequence_length
|
|
||||||
|
|
||||||
# Whether to turn on sample packing
|
|
||||||
if cfg["sample_packing"] is None:
|
|
||||||
cfg["sample_packing"] = click.confirm(
|
|
||||||
"Do you want to turn on sample packing? This will speed up training by packing multiple samples into a single batch.",
|
|
||||||
default=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
if cfg["sample_packing"]:
|
|
||||||
cfg["pad_to_sequence_len"] = True
|
|
||||||
|
|
||||||
# Whether to turn off eval sample packing
|
|
||||||
no_eval_sample_packing = click.confirm(
|
|
||||||
"Do you want to turn off eval sample packing? This will slow down evaluation but is recommended if you are using a small validation set.",
|
|
||||||
default=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
if no_eval_sample_packing:
|
|
||||||
cfg["eval_sample_packing"] = False
|
|
||||||
|
|
||||||
# Hardware check
|
|
||||||
try:
|
|
||||||
is_ampere_or_newer = torch.cuda.get_device_capability()[0] >= 8
|
|
||||||
except RuntimeError:
|
|
||||||
is_ampere_or_newer = False
|
|
||||||
except AssertionError: # this is raised if no cuda is available
|
|
||||||
is_ampere_or_newer = False
|
|
||||||
|
|
||||||
# Get num gpus
|
|
||||||
try:
|
|
||||||
num_gpus = torch.cuda.device_count()
|
|
||||||
except RuntimeError:
|
|
||||||
num_gpus = 0
|
|
||||||
|
|
||||||
# Get torch version
|
|
||||||
torch_version = str(torch.__version__).split("+", maxsplit=1)[0]
|
|
||||||
|
|
||||||
is_torch_2_6_or_newer = version.parse(torch_version) >= version.parse("2.6.0")
|
|
||||||
|
|
||||||
# Whether to turn on attention
|
|
||||||
opt = ["xformers", "sdp"]
|
|
||||||
|
|
||||||
if is_ampere_or_newer:
|
|
||||||
opt.append("flash")
|
|
||||||
|
|
||||||
if is_torch_2_6_or_newer:
|
|
||||||
opt.append("flex")
|
|
||||||
|
|
||||||
if cfg["sample_packing"]:
|
|
||||||
if "flash" in opt:
|
|
||||||
default_opt = "flash"
|
|
||||||
elif "flex" in opt:
|
|
||||||
default_opt = "flex"
|
|
||||||
else:
|
|
||||||
default_opt = opt[0]
|
|
||||||
|
|
||||||
attention = click.prompt(
|
|
||||||
"Which attention backend do you want to use? Sample packing requires an attention backend to be set.",
|
|
||||||
type=click.Choice(opt),
|
|
||||||
default=default_opt,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# non-sample packing supports no attention and S2
|
|
||||||
opt.extend(["none", "s2"])
|
|
||||||
|
|
||||||
attention = click.prompt(
|
|
||||||
"Which attention backend do you want to use?",
|
|
||||||
type=click.Choice(opt),
|
|
||||||
default="none",
|
|
||||||
)
|
|
||||||
|
|
||||||
if attention == "none":
|
|
||||||
attention = None
|
|
||||||
|
|
||||||
# TODO: if xformers, check if FA is installed
|
|
||||||
# TODO: flex doc mentioned requiring seq len to be divisible by 128. Unclear if limitation still exists
|
|
||||||
|
|
||||||
# TODO: requires #2489
|
|
||||||
cfg["attention"] = attention
|
|
||||||
|
|
||||||
# Whether to turn on gradient checkpointing
|
|
||||||
# TODO: need to wait for offload_disk PR to be merged
|
|
||||||
gradient_checkpointing = click.prompt(
|
|
||||||
"Which gradient checkpointing strategy do you want to use?",
|
|
||||||
type=click.Choice(["none", "true", "offload", "offload_disk"]),
|
|
||||||
default="true",
|
|
||||||
)
|
|
||||||
|
|
||||||
if gradient_checkpointing == "none":
|
|
||||||
gradient_checkpointing = False
|
|
||||||
elif gradient_checkpointing == "true":
|
|
||||||
gradient_checkpointing = True
|
|
||||||
|
|
||||||
# Ask whether to set use_reentrant
|
|
||||||
# TODO: get correct defaults based on SFT/RL mode and single/multigpu
|
|
||||||
# use_reentrant = click.confirm(
|
|
||||||
# "Do you want to set use_reentrant?",
|
|
||||||
# default=True,
|
|
||||||
# )
|
|
||||||
|
|
||||||
# if use_reentrant:
|
|
||||||
# cfg["use_reentrant"] = use_reentrant
|
|
||||||
|
|
||||||
# Optimizer
|
|
||||||
cfg["optimizer"] = click.prompt(
|
|
||||||
"Which optimizer do you want to use?",
|
|
||||||
type=click.Choice((OptimizerNames | CustomSupportedOptimizers)),
|
|
||||||
default=OptimizerNames.ADAMW_TORCH_FUSED,
|
|
||||||
)
|
|
||||||
|
|
||||||
cfg["lr_scheduler"] = click.prompt(
|
|
||||||
"Which learning rate scheduler do you want to use?",
|
|
||||||
type=click.Choice(
|
|
||||||
[
|
|
||||||
"cosine",
|
|
||||||
"one_cycle",
|
|
||||||
"rex",
|
|
||||||
"log_sweep",
|
|
||||||
"linear",
|
|
||||||
"cosine_with_restarts",
|
|
||||||
"polynomial",
|
|
||||||
"constant",
|
|
||||||
"constant_with_warmup",
|
|
||||||
"inverse_sqrt",
|
|
||||||
"reduce_lr_on_plateau",
|
|
||||||
"cosine_with_min_lr",
|
|
||||||
"warmup_stable_decay",
|
|
||||||
]
|
|
||||||
),
|
|
||||||
default="cosine",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Plugins
|
|
||||||
|
|
||||||
cfg["plugins"] = []
|
|
||||||
|
|
||||||
# Whether to turn on cut cross entropy
|
|
||||||
if is_ampere_or_newer:
|
|
||||||
# Note: This may error if users don't have CCE installed
|
|
||||||
from axolotl.integrations.cut_cross_entropy.monkeypatch.patch import (
|
|
||||||
CUT_CROSS_ENTROPY_MODEL_MAPPING,
|
|
||||||
)
|
|
||||||
|
|
||||||
if model_type in CUT_CROSS_ENTROPY_MODEL_MAPPING:
|
|
||||||
cut_cross_entropy = click.confirm(
|
|
||||||
"Do you want to turn on cut cross entropy? This will save VRAM if the model has a large vocab size.",
|
|
||||||
default=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
if cut_cross_entropy:
|
|
||||||
cfg["plugins"].append(
|
|
||||||
"axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin"
|
|
||||||
)
|
|
||||||
|
|
||||||
cfg["cut_cross_entropy"] = True
|
|
||||||
|
|
||||||
use_liger_kernel = click.confirm(
|
|
||||||
"Do you want to use the liger kernel? This will speed up training and save VRAM.",
|
|
||||||
default=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
if use_liger_kernel:
|
|
||||||
cfg["plugins"].append("axolotl.integrations.liger.LigerPlugin")
|
|
||||||
|
|
||||||
cfg["liger_rope"] = click.confirm(
|
|
||||||
"Do you want to enable liger rope?",
|
|
||||||
default=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
cfg["liger_rms_norm"] = click.confirm(
|
|
||||||
"Do you want to enable liger rms norm?",
|
|
||||||
default=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
cfg["liger_glu_activation"] = click.confirm(
|
|
||||||
"Do you want to enable liger glu activation?",
|
|
||||||
default=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
cfg["liger_layer_norm"] = click.confirm(
|
|
||||||
"Do you want to enable liger layer norm?",
|
|
||||||
default=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
if cfg["cut_cross_entropy"] is not True:
|
|
||||||
cfg["liger_fused_linear_cross_entropy"] = click.confirm(
|
|
||||||
"Do you want to enable liger fused linear cross entropy?",
|
|
||||||
default=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# TODO: lora kernels (but they auto enable via validator already)
|
|
||||||
|
|
||||||
# TODO: is there incompat between torch compile and liger?
|
|
||||||
cfg["torch_compile"] = click.confirm(
|
|
||||||
"Do you want to enable torch compile?",
|
|
||||||
default=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Multi-gpu
|
|
||||||
if num_gpus > 1:
|
|
||||||
# Ask whether to use DDP/Deepspeed/FSDP
|
|
||||||
multi_gpu_mode = click.prompt(
|
|
||||||
"Which multi-gpu mode do you want to use?",
|
|
||||||
type=click.Choice(["ddp", "deepspeed", "fsdp"]),
|
|
||||||
default="ddp",
|
|
||||||
)
|
|
||||||
|
|
||||||
if multi_gpu_mode == "deepspeed":
|
|
||||||
# Ask which deepspeed config to use
|
|
||||||
cfg["deepspeed"] = click.prompt(
|
|
||||||
"Which deepspeed config do you want to use? The higher the number, the more VRAM you will save, but the slower it will run.",
|
|
||||||
type=click.Choice(
|
|
||||||
[
|
|
||||||
"zero1.json",
|
|
||||||
"zero1_torch_compile.json",
|
|
||||||
"zero2.json",
|
|
||||||
"zero3.json",
|
|
||||||
"zero3_bf16.json",
|
|
||||||
"zero3_bf16_cpuoffload_all.json",
|
|
||||||
"zero3_bf16_cpuoffload_params.json",
|
|
||||||
]
|
|
||||||
),
|
|
||||||
default="zero1.json",
|
|
||||||
)
|
|
||||||
elif multi_gpu_mode == "fsdp":
|
|
||||||
fsdp_version = click.prompt(
|
|
||||||
"Which fsdp version do you want to use?",
|
|
||||||
type=click.Choice([1, 2]),
|
|
||||||
default=2,
|
|
||||||
)
|
|
||||||
|
|
||||||
# TODO: Handle FSDP config
|
|
||||||
|
|
||||||
if fsdp_version == 1:
|
|
||||||
cfg["fsdp"] = ["full_shard", "auto_wrap"]
|
|
||||||
|
|
||||||
# Ask which state dict type to use
|
|
||||||
fsdp_state_dict_type = click.prompt(
|
|
||||||
"Which fsdp state dict type do you want to use?",
|
|
||||||
type=click.Choice(["FULL_STATE_DICT", "SHARDED_STATE_DICT"]),
|
|
||||||
default="FULL_STATE_DICT",
|
|
||||||
)
|
|
||||||
|
|
||||||
fsdp_offload_params = click.confirm(
|
|
||||||
"Do you want to offload parameters?",
|
|
||||||
default=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# TODO: can we load the model class and auto pull a default for this?
|
|
||||||
fsdp_transformer_layer_cls_to_wrap = click.prompt(
|
|
||||||
"Which transformer layer class to wrap? It is usually the Decoder layer class.",
|
|
||||||
type=str,
|
|
||||||
)
|
|
||||||
|
|
||||||
# TODO: add other options
|
|
||||||
|
|
||||||
cfg["fsdp_config"] = {
|
|
||||||
"state_dict_type": fsdp_state_dict_type,
|
|
||||||
"offload_params": fsdp_offload_params,
|
|
||||||
"transformer_layer_cls_to_wrap": fsdp_transformer_layer_cls_to_wrap,
|
|
||||||
}
|
|
||||||
|
|
||||||
elif fsdp_version == 2:
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
# Training mode (sft or rl)
|
|
||||||
training_mode = click.prompt(
|
|
||||||
"Which training mode do you want to use?",
|
|
||||||
type=click.Choice(["sft", "rl"]),
|
|
||||||
default="sft",
|
|
||||||
)
|
|
||||||
|
|
||||||
if training_mode == "rl":
|
|
||||||
cfg["rl"] = click.prompt(
|
|
||||||
"Which rl mode do you want to use?",
|
|
||||||
type=click.Choice(["dpo", "ipo", "orpo", "kto", "grpo", "simpo"]),
|
|
||||||
)
|
|
||||||
|
|
||||||
# TODO: handle RL options
|
|
||||||
|
|
||||||
# Whether to use adapter
|
|
||||||
|
|
||||||
# Get batch/grad accu
|
|
||||||
|
|
||||||
# Get learning rate
|
|
||||||
|
|
||||||
# Get weight decay
|
|
||||||
|
|
||||||
# Get max grad norm
|
|
||||||
|
|
||||||
# Get num train epochs
|
|
||||||
|
|
||||||
# Get warmup ratio
|
|
||||||
|
|
||||||
# Get save ratio
|
|
||||||
|
|
||||||
# Get eval ratio
|
|
||||||
|
|
||||||
# Get dataset config
|
|
||||||
|
|
||||||
# Load metric tracker
|
|
||||||
|
|
||||||
# Save config to yaml
|
|
||||||
# TODO: improve output yaml formatting. Need to add comments to help separate sections
|
|
||||||
with open(config_path, "w", encoding="utf-8") as f:
|
|
||||||
yaml.dump(cfg.to_dict(), f, sort_keys=False)
|
|
||||||
@@ -14,7 +14,6 @@ from axolotl.utils.data import prepare_dataset
|
|||||||
from axolotl.utils.data.rl import load_prepare_preference_datasets
|
from axolotl.utils.data.rl import load_prepare_preference_datasets
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.models import load_processor, load_tokenizer
|
from axolotl.utils.models import load_processor, load_tokenizer
|
||||||
from axolotl.utils.schemas.enums import RLType
|
|
||||||
from axolotl.utils.tokenization import check_dataset_labels
|
from axolotl.utils.tokenization import check_dataset_labels
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -134,7 +133,7 @@ def load_preference_datasets(
|
|||||||
total_num_steps: Optional[int] = int(
|
total_num_steps: Optional[int] = int(
|
||||||
math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
|
math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
|
||||||
)
|
)
|
||||||
if cfg.rl is RLType.GRPO:
|
if cfg.rl == "grpo":
|
||||||
total_num_steps = None
|
total_num_steps = None
|
||||||
|
|
||||||
if cli_args.debug or cfg.debug:
|
if cli_args.debug or cfg.debug:
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ from axolotl.utils.collators import (
|
|||||||
)
|
)
|
||||||
from axolotl.utils.collators.mm_chat import MultiModalChatDataCollator
|
from axolotl.utils.collators.mm_chat import MultiModalChatDataCollator
|
||||||
from axolotl.utils.models import ensure_dtype
|
from axolotl.utils.models import ensure_dtype
|
||||||
from axolotl.utils.schemas.enums import CustomSupportedOptimizers, RLType
|
from axolotl.utils.schemas.enums import CustomSupportedOptimizers
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import torch._dynamo # pylint: disable=ungrouped-imports
|
import torch._dynamo # pylint: disable=ungrouped-imports
|
||||||
@@ -353,7 +353,7 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
training_arguments_kwargs["warmup_steps"] = warmup_steps
|
training_arguments_kwargs["warmup_steps"] = warmup_steps
|
||||||
training_arguments_kwargs["logging_steps"] = logging_steps
|
training_arguments_kwargs["logging_steps"] = logging_steps
|
||||||
|
|
||||||
if self.cfg.seed is not None:
|
if self.cfg.seed:
|
||||||
training_arguments_kwargs["seed"] = self.cfg.seed
|
training_arguments_kwargs["seed"] = self.cfg.seed
|
||||||
|
|
||||||
if self.cfg.gradient_checkpointing:
|
if self.cfg.gradient_checkpointing:
|
||||||
@@ -547,6 +547,8 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
report_to = []
|
report_to = []
|
||||||
if self.cfg.use_wandb:
|
if self.cfg.use_wandb:
|
||||||
report_to.append("wandb")
|
report_to.append("wandb")
|
||||||
|
if self.cfg.wandb_name:
|
||||||
|
training_arguments_kwargs["run_name"] = self.cfg.wandb_name
|
||||||
if self.cfg.use_mlflow:
|
if self.cfg.use_mlflow:
|
||||||
report_to.append("mlflow")
|
report_to.append("mlflow")
|
||||||
if self.cfg.use_tensorboard:
|
if self.cfg.use_tensorboard:
|
||||||
@@ -819,15 +821,14 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
data_collator_kwargs = {
|
data_collator_kwargs = {
|
||||||
"padding": True, # True/"longest" is the default
|
"padding": True, # True/"longest" is the default
|
||||||
}
|
}
|
||||||
multiple = 64
|
|
||||||
if self.cfg.pad_to_sequence_len:
|
if self.cfg.pad_to_sequence_len:
|
||||||
data_collator_kwargs["pad_to_multiple_of"] = multiple * math.ceil(
|
data_collator_kwargs["pad_to_multiple_of"] = 64 * math.ceil(
|
||||||
self.cfg.sequence_len / multiple
|
self.cfg.sequence_len / 64
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# A100 is best at 64, while others at 8. Let's use the larger so we don't have to check
|
# A100 is best at 64, while others at 8. Let's use the larger so we don't have to check
|
||||||
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html
|
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html
|
||||||
data_collator_kwargs["pad_to_multiple_of"] = multiple
|
data_collator_kwargs["pad_to_multiple_of"] = 64
|
||||||
|
|
||||||
if self.cfg.reward_model:
|
if self.cfg.reward_model:
|
||||||
data_collator_kwargs["max_length"] = self.cfg.sequence_len
|
data_collator_kwargs["max_length"] = self.cfg.sequence_len
|
||||||
@@ -1033,10 +1034,6 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
training_args_kwargs["dataloader_prefetch_factor"] = (
|
training_args_kwargs["dataloader_prefetch_factor"] = (
|
||||||
self.cfg.dataloader_prefetch_factor
|
self.cfg.dataloader_prefetch_factor
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.cfg.seed is not None:
|
|
||||||
training_args_kwargs["seed"] = self.cfg.seed
|
|
||||||
|
|
||||||
if self.cfg.gradient_checkpointing:
|
if self.cfg.gradient_checkpointing:
|
||||||
training_args_kwargs["gradient_checkpointing"] = (
|
training_args_kwargs["gradient_checkpointing"] = (
|
||||||
self.cfg.gradient_checkpointing
|
self.cfg.gradient_checkpointing
|
||||||
@@ -1079,13 +1076,9 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.use_wandb:
|
if self.cfg.use_wandb:
|
||||||
training_args_kwargs["run_name"] = self.cfg.wandb_name
|
training_args_kwargs["run_name"] = self.cfg.wandb_name
|
||||||
|
|
||||||
training_args_kwargs["sequence_parallel_degree"] = (
|
|
||||||
self.cfg.sequence_parallel_degree
|
|
||||||
)
|
|
||||||
|
|
||||||
training_args_cls = None
|
training_args_cls = None
|
||||||
blocklist_args_kwargs = []
|
blocklist_args_kwargs = []
|
||||||
if self.cfg.rl is RLType.SIMPO:
|
if self.cfg.rl == "simpo":
|
||||||
training_args_cls = AxolotlCPOConfig
|
training_args_cls = AxolotlCPOConfig
|
||||||
training_args_kwargs["loss_type"] = "simpo"
|
training_args_kwargs["loss_type"] = "simpo"
|
||||||
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
||||||
@@ -1093,13 +1086,13 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.cpo_alpha is not None:
|
if self.cfg.cpo_alpha is not None:
|
||||||
training_args_kwargs["cpo_alpha"] = self.cfg.cpo_alpha
|
training_args_kwargs["cpo_alpha"] = self.cfg.cpo_alpha
|
||||||
|
|
||||||
elif self.cfg.rl is RLType.ORPO:
|
elif self.cfg.rl == "orpo":
|
||||||
training_args_cls = AxolotlORPOConfig
|
training_args_cls = AxolotlORPOConfig
|
||||||
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
||||||
if self.cfg.max_prompt_len:
|
if self.cfg.max_prompt_len:
|
||||||
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
||||||
|
|
||||||
elif self.cfg.rl is RLType.KTO:
|
elif self.cfg.rl == "kto":
|
||||||
training_args_cls = AxolotlKTOConfig
|
training_args_cls = AxolotlKTOConfig
|
||||||
|
|
||||||
training_args_kwargs["desirable_weight"] = (
|
training_args_kwargs["desirable_weight"] = (
|
||||||
@@ -1113,14 +1106,14 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.max_prompt_len:
|
if self.cfg.max_prompt_len:
|
||||||
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
||||||
|
|
||||||
elif self.cfg.rl is RLType.GRPO:
|
elif self.cfg.rl == "grpo":
|
||||||
training_args_cls = GRPOStrategy.get_training_args_class()
|
training_args_cls = GRPOStrategy.get_training_args_class()
|
||||||
training_args_kwargs.update(GRPOStrategy.set_training_args_kwargs(self.cfg))
|
training_args_kwargs.update(GRPOStrategy.set_training_args_kwargs(self.cfg))
|
||||||
blocklist_args_kwargs = GRPOStrategy.get_blocklist_args_kwargs()
|
blocklist_args_kwargs = GRPOStrategy.get_blocklist_args_kwargs()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
training_args_cls = AxolotlDPOConfig
|
training_args_cls = AxolotlDPOConfig
|
||||||
if self.cfg.rl is RLType.IPO:
|
if self.cfg.rl == "ipo":
|
||||||
training_args_kwargs["loss_type"] = "ipo"
|
training_args_kwargs["loss_type"] = "ipo"
|
||||||
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
||||||
training_args_kwargs["max_completion_length"] = None
|
training_args_kwargs["max_completion_length"] = None
|
||||||
@@ -1163,35 +1156,33 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
|
|
||||||
def build(self, total_num_steps):
|
def build(self, total_num_steps):
|
||||||
training_args = self.build_training_arguments(total_num_steps)
|
training_args = self.build_training_arguments(total_num_steps)
|
||||||
trainer_kwargs = {}
|
dpo_trainer_kwargs = {}
|
||||||
if self.cfg.rl is RLType.IPO:
|
if self.cfg.rl == "ipo":
|
||||||
if self.cfg.dpo_label_smoothing:
|
if self.cfg.dpo_label_smoothing:
|
||||||
trainer_kwargs["label_smoothing"] = self.cfg.dpo_label_smoothing
|
dpo_trainer_kwargs["label_smoothing"] = self.cfg.dpo_label_smoothing
|
||||||
if self.eval_dataset:
|
if self.eval_dataset:
|
||||||
trainer_kwargs["eval_dataset"] = self.eval_dataset
|
dpo_trainer_kwargs["eval_dataset"] = self.eval_dataset
|
||||||
if self.cfg.adapter and self.peft_config:
|
if self.cfg.adapter and self.peft_config:
|
||||||
trainer_kwargs["peft_config"] = self.peft_config
|
dpo_trainer_kwargs["peft_config"] = self.peft_config
|
||||||
if self.cfg.precompute_ref_log_probs is not None:
|
if self.cfg.precompute_ref_log_probs is not None:
|
||||||
trainer_kwargs["precompute_ref_log_probs"] = (
|
dpo_trainer_kwargs["precompute_ref_log_probs"] = (
|
||||||
self.cfg.precompute_ref_log_probs
|
self.cfg.precompute_ref_log_probs
|
||||||
)
|
)
|
||||||
if self.cfg.rl is RLType.GRPO:
|
if self.cfg.rl == "grpo":
|
||||||
trainer_cls = GRPOStrategy.get_trainer_class(
|
trainer_cls = GRPOStrategy.get_trainer_class()
|
||||||
sequence_parallel=self.cfg.sequence_parallel_degree > 1
|
|
||||||
)
|
|
||||||
trainer_cls_args = [self.model]
|
trainer_cls_args = [self.model]
|
||||||
trainer_cls_args.extend(GRPOStrategy.set_trainer_args(self.cfg))
|
trainer_cls_args.extend(GRPOStrategy.set_trainer_args(self.cfg))
|
||||||
trainer_kwargs.update(GRPOStrategy.set_trainer_kwargs(self.cfg))
|
dpo_trainer_kwargs.update(GRPOStrategy.set_trainer_kwargs(self.cfg))
|
||||||
elif self.cfg.rl in [RLType.DPO, RLType.IPO]:
|
elif self.cfg.rl in ["dpo", "ipo"]:
|
||||||
trainer_cls = DPOStrategy.get_trainer_class()
|
trainer_cls = DPOStrategy.get_trainer_class()
|
||||||
trainer_cls_args = [self.model, self.model_ref]
|
trainer_cls_args = [self.model, self.model_ref]
|
||||||
elif self.cfg.rl is RLType.ORPO:
|
elif self.cfg.rl == "orpo":
|
||||||
trainer_cls = AxolotlORPOTrainer
|
trainer_cls = AxolotlORPOTrainer
|
||||||
trainer_cls_args = [self.model]
|
trainer_cls_args = [self.model]
|
||||||
elif self.cfg.rl is RLType.KTO:
|
elif self.cfg.rl in ["kto"]:
|
||||||
trainer_cls = AxolotlKTOTrainer
|
trainer_cls = AxolotlKTOTrainer
|
||||||
trainer_cls_args = [self.model]
|
trainer_cls_args = [self.model]
|
||||||
elif self.cfg.rl is RLType.SIMPO:
|
elif self.cfg.rl in ["simpo"]:
|
||||||
trainer_cls = AxolotlCPOTrainer
|
trainer_cls = AxolotlCPOTrainer
|
||||||
trainer_cls_args = [self.model]
|
trainer_cls_args = [self.model]
|
||||||
else:
|
else:
|
||||||
@@ -1203,33 +1194,33 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
|
|
||||||
sig = inspect.signature(trainer_cls)
|
sig = inspect.signature(trainer_cls)
|
||||||
if "tokenizer" in sig.parameters.keys():
|
if "tokenizer" in sig.parameters.keys():
|
||||||
trainer_kwargs["tokenizer"] = self.tokenizer
|
dpo_trainer_kwargs["tokenizer"] = self.tokenizer
|
||||||
else:
|
else:
|
||||||
trainer_kwargs["processing_class"] = self.tokenizer
|
dpo_trainer_kwargs["processing_class"] = self.tokenizer
|
||||||
|
|
||||||
if self.cfg.datasets is not None and (
|
if self.cfg.datasets is not None and (
|
||||||
trainer_cls is DPOStrategy.get_trainer_class()
|
trainer_cls is DPOStrategy.get_trainer_class()
|
||||||
):
|
):
|
||||||
trainer_kwargs["dataset_tags"] = [
|
dpo_trainer_kwargs["dataset_tags"] = [
|
||||||
d["path"] for d in self.cfg.datasets if not Path(d["path"]).is_dir()
|
d["path"] for d in self.cfg.datasets if not Path(d["path"]).is_dir()
|
||||||
]
|
]
|
||||||
trainer = trainer_cls(
|
dpo_trainer = trainer_cls(
|
||||||
*trainer_cls_args,
|
*trainer_cls_args,
|
||||||
args=training_args,
|
args=training_args,
|
||||||
train_dataset=self.train_dataset,
|
train_dataset=self.train_dataset,
|
||||||
callbacks=self.get_callbacks(),
|
callbacks=self.get_callbacks(),
|
||||||
**trainer_kwargs,
|
**dpo_trainer_kwargs,
|
||||||
)
|
)
|
||||||
if self.cfg.fsdp:
|
if self.cfg.fsdp:
|
||||||
ensure_dtype(trainer.model, dtype=self.cfg.torch_dtype)
|
ensure_dtype(dpo_trainer.model, dtype=self.cfg.torch_dtype)
|
||||||
if self.cfg.rl in [RLType.DPO, RLType.IPO] and trainer.ref_model:
|
if self.cfg.rl in ["dpo", "ipo"] and dpo_trainer.ref_model:
|
||||||
ensure_dtype(trainer.ref_model, dtype=self.cfg.torch_dtype)
|
ensure_dtype(dpo_trainer.ref_model, dtype=self.cfg.torch_dtype)
|
||||||
|
|
||||||
trainer = self.hook_post_create_trainer(trainer)
|
dpo_trainer = self.hook_post_create_trainer(dpo_trainer)
|
||||||
for callback in self.get_post_trainer_create_callbacks(trainer):
|
for callback in self.get_post_trainer_create_callbacks(dpo_trainer):
|
||||||
trainer.add_callback(callback)
|
dpo_trainer.add_callback(callback)
|
||||||
|
|
||||||
return trainer
|
return dpo_trainer
|
||||||
|
|
||||||
|
|
||||||
class HFPPOTrainerBuilder(TrainerBuilderBase):
|
class HFPPOTrainerBuilder(TrainerBuilderBase):
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
from .base import AxolotlTrainer
|
from .base import AxolotlTrainer
|
||||||
from .dpo.trainer import AxolotlDPOTrainer
|
from .dpo.trainer import AxolotlDPOTrainer
|
||||||
from .grpo.trainer import AxolotlGRPOSequenceParallelTrainer, AxolotlGRPOTrainer
|
from .grpo.trainer import AxolotlGRPOTrainer
|
||||||
from .mamba import AxolotlMambaTrainer
|
from .mamba import AxolotlMambaTrainer
|
||||||
from .relora import ReLoRATrainer
|
from .relora import ReLoRATrainer
|
||||||
from .trl import (
|
from .trl import (
|
||||||
|
|||||||
@@ -373,13 +373,15 @@ class AxolotlTrainer(
|
|||||||
num_items_in_batch=num_items_in_batch,
|
num_items_in_batch=num_items_in_batch,
|
||||||
)
|
)
|
||||||
|
|
||||||
return super().compute_loss(
|
loss = super().compute_loss(
|
||||||
model,
|
model,
|
||||||
inputs,
|
inputs,
|
||||||
return_outputs=return_outputs,
|
return_outputs=return_outputs,
|
||||||
num_items_in_batch=num_items_in_batch,
|
num_items_in_batch=num_items_in_batch,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
return loss
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def orpo_concatenate_inputs(inputs, label_pad_token=-100, pad_token=0, device=None):
|
def orpo_concatenate_inputs(inputs, label_pad_token=-100, pad_token=0, device=None):
|
||||||
concatenated_batch = {}
|
concatenated_batch = {}
|
||||||
|
|||||||
@@ -1,11 +1,14 @@
|
|||||||
"""DPO Specific Strategy for training"""
|
"""
|
||||||
|
DPO Specific Strategy for training
|
||||||
|
"""
|
||||||
|
|
||||||
from axolotl.core.trainers.dpo.trainer import AxolotlDPOTrainer
|
from axolotl.core.trainers.dpo.trainer import AxolotlDPOTrainer
|
||||||
from axolotl.utils.schemas.enums import RLType
|
|
||||||
|
|
||||||
|
|
||||||
class DPOStrategy:
|
class DPOStrategy:
|
||||||
"""Strategy for DPO training"""
|
"""
|
||||||
|
Strategy for DPO training
|
||||||
|
"""
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_trainer_class(cls):
|
def get_trainer_class(cls):
|
||||||
@@ -20,7 +23,7 @@ class DPOStrategy:
|
|||||||
@classmethod
|
@classmethod
|
||||||
def set_training_args_kwargs(cls, cfg):
|
def set_training_args_kwargs(cls, cfg):
|
||||||
training_args_kwargs = {}
|
training_args_kwargs = {}
|
||||||
if cfg.rl is RLType.IPO:
|
if cfg.rl == "ipo":
|
||||||
training_args_kwargs["loss_type"] = "ipo"
|
training_args_kwargs["loss_type"] = "ipo"
|
||||||
training_args_kwargs["max_length"] = cfg.sequence_len
|
training_args_kwargs["max_length"] = cfg.sequence_len
|
||||||
training_args_kwargs["max_completion_length"] = None
|
training_args_kwargs["max_completion_length"] = None
|
||||||
|
|||||||
@@ -1,41 +1,37 @@
|
|||||||
"""GRPO Specific Strategy for training"""
|
"""
|
||||||
|
GRPO Specific Strategy for training
|
||||||
|
"""
|
||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
import inspect
|
import inspect
|
||||||
import logging
|
import logging
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
from trl.trainer.grpo_trainer import RewardFunc
|
from trl.trainer.grpo_trainer import RewardFunc
|
||||||
|
|
||||||
from axolotl.core.trainers.grpo.args import AxolotlGRPOConfig
|
from axolotl.core.trainers.grpo.trainer import AxolotlGRPOTrainer
|
||||||
from axolotl.core.trainers.grpo.trainer import (
|
|
||||||
AxolotlGRPOSequenceParallelTrainer,
|
|
||||||
AxolotlGRPOTrainer,
|
|
||||||
)
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
from axolotl.utils.schemas.trl import TRLConfig
|
from axolotl.utils.schemas.trl import TRLConfig
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
|
|
||||||
class GRPOStrategy:
|
class GRPOStrategy:
|
||||||
"""Strategy for GRPO training"""
|
"""
|
||||||
|
Strategy for GRPO training
|
||||||
|
"""
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_trainer_class(
|
def get_trainer_class(cls):
|
||||||
cls, sequence_parallel: bool
|
|
||||||
) -> type[AxolotlGRPOTrainer] | type[AxolotlGRPOSequenceParallelTrainer]:
|
|
||||||
if sequence_parallel:
|
|
||||||
return AxolotlGRPOSequenceParallelTrainer
|
|
||||||
return AxolotlGRPOTrainer
|
return AxolotlGRPOTrainer
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_training_args_class(cls) -> type[AxolotlGRPOConfig]:
|
def get_training_args_class(cls):
|
||||||
|
from axolotl.core.trainers.grpo.args import AxolotlGRPOConfig
|
||||||
|
|
||||||
return AxolotlGRPOConfig
|
return AxolotlGRPOConfig
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def set_training_args_kwargs(cls, cfg: DictDefault) -> dict[str, Any]:
|
def set_training_args_kwargs(cls, cfg):
|
||||||
grpo_args_kwargs: dict[str, Any] = {}
|
grpo_args_kwargs = {}
|
||||||
|
|
||||||
if not hasattr(cfg, "trl") or not cfg.trl:
|
if not hasattr(cfg, "trl") or not cfg.trl:
|
||||||
return grpo_args_kwargs
|
return grpo_args_kwargs
|
||||||
@@ -44,8 +40,8 @@ class GRPOStrategy:
|
|||||||
|
|
||||||
if trl.use_vllm:
|
if trl.use_vllm:
|
||||||
grpo_args_kwargs["use_vllm"] = trl.use_vllm
|
grpo_args_kwargs["use_vllm"] = trl.use_vllm
|
||||||
grpo_args_kwargs["vllm_server_host"] = trl.vllm_server_host or trl.vllm.host # type: ignore[attr-defined]
|
grpo_args_kwargs["vllm_server_host"] = trl.vllm_server_host or trl.vllm.host
|
||||||
grpo_args_kwargs["vllm_server_port"] = trl.vllm_server_port or trl.vllm.port # type: ignore[attr-defined]
|
grpo_args_kwargs["vllm_server_port"] = trl.vllm_server_port or trl.vllm.port
|
||||||
if trl.vllm_server_timeout:
|
if trl.vllm_server_timeout:
|
||||||
grpo_args_kwargs["vllm_server_timeout"] = trl.vllm_server_timeout
|
grpo_args_kwargs["vllm_server_timeout"] = trl.vllm_server_timeout
|
||||||
if trl.vllm_guided_decoding_regex:
|
if trl.vllm_guided_decoding_regex:
|
||||||
@@ -106,18 +102,17 @@ class GRPOStrategy:
|
|||||||
return grpo_args_kwargs
|
return grpo_args_kwargs
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def set_trainer_args(cls, cfg: DictDefault) -> list[Any]:
|
def set_trainer_args(cls, cfg):
|
||||||
trainer_args = []
|
trainer_args = []
|
||||||
if cfg.trl and cfg.trl.reward_funcs:
|
if cfg.trl and cfg.trl.reward_funcs:
|
||||||
reward_funcs = []
|
reward_funcs = []
|
||||||
for reward_func_fqn in cfg.trl.reward_funcs:
|
for reward_func_fqn in cfg.trl.reward_funcs:
|
||||||
reward_funcs.append(cls.get_reward_func(reward_func_fqn))
|
reward_funcs.append(cls.get_reward_func(reward_func_fqn))
|
||||||
trainer_args.append(reward_funcs)
|
trainer_args.append(reward_funcs)
|
||||||
|
|
||||||
return trainer_args
|
return trainer_args
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def set_trainer_kwargs(cls, cfg: DictDefault) -> dict[str, Any]:
|
def set_trainer_kwargs(cls, cfg):
|
||||||
trainer_kwargs = {}
|
trainer_kwargs = {}
|
||||||
if cfg.trl and cfg.trl.reward_processing_classes:
|
if cfg.trl and cfg.trl.reward_processing_classes:
|
||||||
trainer_kwargs["reward_processing_classes"] = (
|
trainer_kwargs["reward_processing_classes"] = (
|
||||||
@@ -131,7 +126,7 @@ class GRPOStrategy:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_blocklist_args_kwargs(cls) -> list[str]:
|
def get_blocklist_args_kwargs(cls):
|
||||||
return ["dataset_num_proc"]
|
return ["dataset_num_proc"]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -142,13 +137,13 @@ class GRPOStrategy:
|
|||||||
Args:
|
Args:
|
||||||
reward_func_fqn (str): Fully qualified name of the reward function (e.g. r1_grpo.gsm8k_transform),
|
reward_func_fqn (str): Fully qualified name of the reward function (e.g. r1_grpo.gsm8k_transform),
|
||||||
or a HF hub path to the reward model.
|
or a HF hub path to the reward model.
|
||||||
|
Raises:
|
||||||
|
ValueError: If the reward function does not accept at least two arguments.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
RewardFunc: A callable that accepts prompts and completions and returns rewards,
|
RewardFunc: A callable that accepts prompts and completions and returns rewards,
|
||||||
or a path to a reward model.
|
or a path to a reward model.
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If the reward function does not accept at least two arguments.
|
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
# use importlib to dynamically load the reward function from the module
|
# use importlib to dynamically load the reward function from the module
|
||||||
|
|||||||
@@ -11,4 +11,6 @@ from axolotl.core.training_args import AxolotlTrainingMixins
|
|||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class AxolotlGRPOConfig(AxolotlTrainingMixins, GRPOConfig):
|
class AxolotlGRPOConfig(AxolotlTrainingMixins, GRPOConfig):
|
||||||
"""Axolotl GRPO Config for GRPO training"""
|
"""
|
||||||
|
Axolotl GRPO Config for GRPO training
|
||||||
|
"""
|
||||||
|
|||||||
@@ -1,172 +0,0 @@
|
|||||||
"""Repeat random sampler (similar to the one implemented in
|
|
||||||
https://github.com/huggingface/trl/blob/main/trl/trainer/grpo_trainer.py) that adds
|
|
||||||
sequence parallelism functionality; i.e., duplicating data across ranks in the same
|
|
||||||
sequence parallel group.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Iterator, Sized
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from torch.utils.data import Sampler
|
|
||||||
|
|
||||||
|
|
||||||
class SequenceParallelRepeatRandomSampler(Sampler):
|
|
||||||
"""Sampler for GRPO training with sequence parallelism.
|
|
||||||
|
|
||||||
This sampler ensures:
|
|
||||||
- Ranks in the same sequence parallel (SP) group receive identical data.
|
|
||||||
- Each index is repeated multiple times for sampling different completions.
|
|
||||||
- Entire batches are repeated for reuse in multiple updates.
|
|
||||||
- Data is properly distributed across SP groups.
|
|
||||||
|
|
||||||
In the table below, the values represent dataset indices. Each SP group has
|
|
||||||
`sequence_parallel_degree = 2` GPUs working together on the same data. There are 2
|
|
||||||
SP groups (SP0 and SP1), with `world_size = 4` total GPUs.
|
|
||||||
|
|
||||||
Sequence Parallel Groups
|
|
||||||
| SP0 | SP1 |
|
|
||||||
| GPU 0 | GPU 1 | GPU 2 | GPU 3 |
|
|
||||||
global_step step <---> mini_repeat_count=3
|
|
||||||
<----------> batch_size=2 per SP group
|
|
||||||
grad_accum=2 ▲ ▲ 0 0 [0 0 0 1 1 1] [2 2 2 3 3 3] <- SP groups get different data
|
|
||||||
▼ | 0 1 [0 0 0 1 1 1] [2 2 2 3 3 3] <- Same data for each SP group GPU
|
|
||||||
|
|
|
||||||
| 1 2 [0 0 0 1 1 1] [2 2 2 3 3 3] <- Repeat same indices for iterations
|
|
||||||
num_iterations=2 ▼ 1 3 [0 0 0 1 1 1] [2 2 2 3 3 3] <- When using gradient accumulation
|
|
||||||
|
|
||||||
2 4 [4 4 4 5 5 5] [6 6 6 7 7 7] <- New batch of data indices
|
|
||||||
2 5 [4 4 4 5 5 5] [6 6 6 7 7 7]
|
|
||||||
...
|
|
||||||
|
|
||||||
Args:
|
|
||||||
dataset: Dataset to sample from.
|
|
||||||
mini_repeat_count: How many times to repeat each sample immediately.
|
|
||||||
world_size: Total number of processes.
|
|
||||||
rank: Rank of current process.
|
|
||||||
batch_size: Number of samples per batch.
|
|
||||||
repeat_count: How many times to repeat the full sampling process.
|
|
||||||
sequence_parallel_degree: Number of ranks in a sequence parallel group.
|
|
||||||
shuffle: Whether to shuffle the dataset.
|
|
||||||
seed: Random seed for shuffling.
|
|
||||||
drop_last: Whether to drop the last incomplete batch.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
dataset: Sized,
|
|
||||||
mini_repeat_count: int,
|
|
||||||
world_size: int,
|
|
||||||
rank: int,
|
|
||||||
batch_size: int = 1,
|
|
||||||
repeat_count: int = 1,
|
|
||||||
sequence_parallel_degree: int = 1,
|
|
||||||
shuffle: bool = True,
|
|
||||||
seed: int = 0,
|
|
||||||
drop_last: bool = False,
|
|
||||||
):
|
|
||||||
self.dataset = dataset
|
|
||||||
self.mini_repeat_count = mini_repeat_count
|
|
||||||
self.batch_size = batch_size
|
|
||||||
self.repeat_count = repeat_count
|
|
||||||
self.shuffle = shuffle
|
|
||||||
self.seed = seed
|
|
||||||
self.drop_last = drop_last
|
|
||||||
self.epoch = 0
|
|
||||||
|
|
||||||
self.world_size = world_size
|
|
||||||
self.rank = rank
|
|
||||||
|
|
||||||
# Sequence parallelism parameters
|
|
||||||
self.sequence_parallel_degree = sequence_parallel_degree
|
|
||||||
self.num_sp_groups = world_size // sequence_parallel_degree
|
|
||||||
self.sp_group_id = rank // sequence_parallel_degree
|
|
||||||
|
|
||||||
# Adjust dataset size for distributed sampling
|
|
||||||
self.num_samples = len(self.dataset)
|
|
||||||
self.total_size = self.num_samples
|
|
||||||
|
|
||||||
# Calculate effective number of samples per SP group
|
|
||||||
if (
|
|
||||||
self.drop_last
|
|
||||||
and self.total_size % (self.num_sp_groups * self.batch_size) != 0
|
|
||||||
):
|
|
||||||
# Drop last incomplete batch if drop_last is True
|
|
||||||
self.num_samples_per_sp_group = (
|
|
||||||
self.total_size // self.batch_size // self.num_sp_groups
|
|
||||||
) * self.batch_size
|
|
||||||
else:
|
|
||||||
# Round up to include last batch if drop_last is False
|
|
||||||
self.num_samples_per_sp_group = (
|
|
||||||
(self.total_size + self.batch_size * self.num_sp_groups - 1)
|
|
||||||
// (self.batch_size * self.num_sp_groups)
|
|
||||||
* self.batch_size
|
|
||||||
)
|
|
||||||
|
|
||||||
if shuffle:
|
|
||||||
self.generator = torch.Generator()
|
|
||||||
self.generator.manual_seed(seed)
|
|
||||||
|
|
||||||
def __iter__(self) -> Iterator[int]:
|
|
||||||
"""Creates iterator over dataset indices.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Iterator that yields indices into the dataset.
|
|
||||||
"""
|
|
||||||
# Deterministically shuffle based on epoch and seed
|
|
||||||
if self.shuffle:
|
|
||||||
indices = torch.randperm(
|
|
||||||
self.num_samples, generator=self.generator
|
|
||||||
).tolist()
|
|
||||||
else:
|
|
||||||
indices = list(range(self.num_samples))
|
|
||||||
|
|
||||||
# Add extra samples to make it evenly divisible by batch_size
|
|
||||||
if len(indices) % self.batch_size != 0:
|
|
||||||
padding = indices[: self.batch_size - len(indices) % self.batch_size]
|
|
||||||
indices += padding
|
|
||||||
|
|
||||||
# Subsample based on SP group ID
|
|
||||||
# Each SP group gets distinct batches of data
|
|
||||||
batch_indices = []
|
|
||||||
for i in range(0, len(indices), self.batch_size * self.num_sp_groups):
|
|
||||||
start_idx = i + self.sp_group_id * self.batch_size
|
|
||||||
end_idx = min(start_idx + self.batch_size, len(indices))
|
|
||||||
if start_idx < len(indices):
|
|
||||||
for j in range(self.batch_size):
|
|
||||||
if start_idx + j < end_idx:
|
|
||||||
batch_indices.append(indices[start_idx + j])
|
|
||||||
|
|
||||||
# Make sure batch_indices is exactly batch_size * num_batches_per_sp_group
|
|
||||||
if self.drop_last:
|
|
||||||
num_batches_per_sp_group = self.num_samples_per_sp_group // self.batch_size
|
|
||||||
target_len = self.batch_size * num_batches_per_sp_group
|
|
||||||
if len(batch_indices) > target_len:
|
|
||||||
batch_indices = batch_indices[:target_len]
|
|
||||||
|
|
||||||
# Apply the GRPO repeat pattern
|
|
||||||
final_indices = []
|
|
||||||
for _ in range(self.repeat_count):
|
|
||||||
for idx in batch_indices:
|
|
||||||
for _ in range(self.mini_repeat_count):
|
|
||||||
final_indices.append(idx)
|
|
||||||
|
|
||||||
return iter(final_indices)
|
|
||||||
|
|
||||||
def __len__(self) -> int:
|
|
||||||
"""Returns the total length of the iterable including repetitions.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Total number of samples.
|
|
||||||
"""
|
|
||||||
# Total length including all repetitions
|
|
||||||
return (
|
|
||||||
self.num_samples_per_sp_group * self.mini_repeat_count * self.repeat_count
|
|
||||||
)
|
|
||||||
|
|
||||||
def set_epoch(self, epoch: int) -> None:
|
|
||||||
"""Sets the epoch for this sampler.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
epoch: Epoch number to use for shuffling.
|
|
||||||
"""
|
|
||||||
self.epoch = epoch
|
|
||||||
@@ -1,63 +1,23 @@
|
|||||||
"""Axolotl GRPO trainers (with and without sequence parallelism handling)"""
|
"""
|
||||||
|
Axolotl GRPO trainer
|
||||||
|
"""
|
||||||
|
|
||||||
# pylint: disable=too-many-lines,duplicate-code,protected-access,no-member
|
|
||||||
|
|
||||||
import warnings
|
|
||||||
from contextlib import nullcontext
|
from contextlib import nullcontext
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
import datasets
|
from accelerate.utils import is_deepspeed_available, is_peft_model
|
||||||
import torch
|
|
||||||
import torch.distributed as dist
|
|
||||||
import torch.utils.data
|
|
||||||
from accelerate.utils import (
|
|
||||||
broadcast_object_list,
|
|
||||||
gather,
|
|
||||||
gather_object,
|
|
||||||
is_peft_model,
|
|
||||||
)
|
|
||||||
from datasets import Dataset, IterableDataset
|
|
||||||
from torch import nn
|
|
||||||
from torch.utils.data import (
|
|
||||||
BatchSampler,
|
|
||||||
DataLoader,
|
|
||||||
Sampler,
|
|
||||||
)
|
|
||||||
from transformers import (
|
|
||||||
PreTrainedModel,
|
|
||||||
PreTrainedTokenizerBase,
|
|
||||||
Trainer,
|
|
||||||
TrainerCallback,
|
|
||||||
)
|
|
||||||
from transformers.trainer_utils import seed_worker
|
|
||||||
from transformers.utils import is_peft_available
|
|
||||||
from trl import GRPOTrainer
|
from trl import GRPOTrainer
|
||||||
from trl.data_utils import (
|
from trl.extras.profiling import profiling_decorator
|
||||||
apply_chat_template,
|
|
||||||
is_conversational,
|
|
||||||
maybe_apply_chat_template,
|
|
||||||
)
|
|
||||||
from trl.extras.profiling import profiling_context, profiling_decorator
|
|
||||||
from trl.import_utils import is_deepspeed_available
|
|
||||||
from trl.models import unwrap_model_for_generation
|
|
||||||
from trl.trainer.grpo_config import GRPOConfig
|
|
||||||
from trl.trainer.grpo_trainer import RewardFunc, nanstd
|
|
||||||
from trl.trainer.utils import pad
|
|
||||||
|
|
||||||
from axolotl.core.trainers.grpo.sampler import SequenceParallelRepeatRandomSampler
|
|
||||||
from axolotl.core.trainers.mixins import RngLoaderMixin, SchedulerMixin
|
from axolotl.core.trainers.mixins import RngLoaderMixin, SchedulerMixin
|
||||||
from axolotl.monkeypatch.attention.ring_attn.patch import get_ring_attn_group
|
|
||||||
|
|
||||||
if is_peft_available():
|
|
||||||
# pylint: disable=unused-import
|
|
||||||
from peft import PeftConfig
|
|
||||||
|
|
||||||
if is_deepspeed_available():
|
if is_deepspeed_available():
|
||||||
import deepspeed
|
import deepspeed
|
||||||
|
|
||||||
|
|
||||||
class AxolotlGRPOTrainer(RngLoaderMixin, SchedulerMixin, GRPOTrainer):
|
class AxolotlGRPOTrainer(RngLoaderMixin, SchedulerMixin, GRPOTrainer):
|
||||||
"""Extend the base GRPOTrainer for axolotl helpers"""
|
"""
|
||||||
|
Extend the base GRPOTrainer for axolotl helpers
|
||||||
|
"""
|
||||||
|
|
||||||
_tag_names = ["trl", "grpo", "axolotl"]
|
_tag_names = ["trl", "grpo", "axolotl"]
|
||||||
|
|
||||||
@@ -107,600 +67,3 @@ class AxolotlGRPOTrainer(RngLoaderMixin, SchedulerMixin, GRPOTrainer):
|
|||||||
# Reset cache on main process
|
# Reset cache on main process
|
||||||
if self.accelerator.is_main_process:
|
if self.accelerator.is_main_process:
|
||||||
self.vllm_client.reset_prefix_cache()
|
self.vllm_client.reset_prefix_cache()
|
||||||
|
|
||||||
|
|
||||||
class AxolotlGRPOSequenceParallelTrainer(AxolotlGRPOTrainer):
|
|
||||||
"""Extend the base GRPOTrainer for sequence parallelism handling"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
model: str | PreTrainedModel,
|
|
||||||
reward_funcs: RewardFunc | list[RewardFunc],
|
|
||||||
args: GRPOConfig | None = None,
|
|
||||||
train_dataset: Dataset | IterableDataset | None = None,
|
|
||||||
eval_dataset: (
|
|
||||||
Dataset | IterableDataset | dict[str, Dataset | IterableDataset] | None
|
|
||||||
) = None,
|
|
||||||
processing_class: PreTrainedTokenizerBase | None = None,
|
|
||||||
reward_processing_classes: (
|
|
||||||
PreTrainedTokenizerBase | list[PreTrainedTokenizerBase] | None
|
|
||||||
) = None,
|
|
||||||
callbacks: list[TrainerCallback] | None = None,
|
|
||||||
optimizers: tuple[
|
|
||||||
torch.optim.Optimizer | None, torch.optim.lr_scheduler.LambdaLR | None
|
|
||||||
] = (None, None),
|
|
||||||
peft_config: "PeftConfig | None" = None,
|
|
||||||
):
|
|
||||||
# First call the superclass constructor with all arguments
|
|
||||||
super().__init__(
|
|
||||||
model=model,
|
|
||||||
reward_funcs=reward_funcs,
|
|
||||||
args=args,
|
|
||||||
train_dataset=train_dataset,
|
|
||||||
eval_dataset=eval_dataset,
|
|
||||||
processing_class=processing_class,
|
|
||||||
reward_processing_classes=reward_processing_classes,
|
|
||||||
callbacks=callbacks,
|
|
||||||
optimizers=optimizers,
|
|
||||||
peft_config=peft_config,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get number of SP groups (number of processes divided by SP degree)
|
|
||||||
num_processes = self.accelerator.num_processes
|
|
||||||
num_sp_groups = num_processes // self.args.sequence_parallel_degree
|
|
||||||
|
|
||||||
# Calculate batch size per SP group (not per process)
|
|
||||||
sp_group_batch_size = self.args.per_device_train_batch_size * num_sp_groups
|
|
||||||
possible_values = [
|
|
||||||
n_gen
|
|
||||||
for n_gen in range(2, sp_group_batch_size + 1)
|
|
||||||
if (sp_group_batch_size) % n_gen == 0
|
|
||||||
]
|
|
||||||
|
|
||||||
if self.num_generations not in possible_values:
|
|
||||||
raise ValueError(
|
|
||||||
f"The batch size per SP group ({num_sp_groups} x "
|
|
||||||
f"{self.args.per_device_train_batch_size}) must be evenly divisible by "
|
|
||||||
f"the number of generations per prompt ({self.num_generations}). Given "
|
|
||||||
"the current configuration, the valid values for the number of "
|
|
||||||
f"generations are: {possible_values}."
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.args.eval_strategy != "no":
|
|
||||||
# If sequence parallelism is enabled, calculate batch size per SP group
|
|
||||||
sp_group_eval_batch_size = args.per_device_eval_batch_size * num_sp_groups # type: ignore[union-attr]
|
|
||||||
possible_values = [
|
|
||||||
n_gen
|
|
||||||
for n_gen in range(2, sp_group_eval_batch_size + 1)
|
|
||||||
if (sp_group_eval_batch_size) % n_gen == 0
|
|
||||||
]
|
|
||||||
|
|
||||||
if self.num_generations not in possible_values:
|
|
||||||
raise ValueError(
|
|
||||||
f"With sequence parallelism (degree {self.args.sequence_parallel_degree}), "
|
|
||||||
f"the eval batch size per SP group ({num_sp_groups} x {self.args.per_device_eval_batch_size}) "
|
|
||||||
f"must be evenly divisible by the number of generations per prompt "
|
|
||||||
f"({self.num_generations}). Given the current eval batch size, "
|
|
||||||
f"the valid values for the number of generations are: {possible_values}."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the SP group
|
|
||||||
self.sp_group = get_ring_attn_group()
|
|
||||||
self.rank = dist.get_rank()
|
|
||||||
self.world_size = dist.get_world_size()
|
|
||||||
self.local_rank = dist.get_rank(group=self.sp_group)
|
|
||||||
self.local_world_size = dist.get_world_size(group=self.sp_group)
|
|
||||||
|
|
||||||
def _get_train_sampler(self) -> Sampler:
|
|
||||||
effective_batch_size = (
|
|
||||||
self.args.per_device_train_batch_size
|
|
||||||
* self.world_size
|
|
||||||
* self.args.gradient_accumulation_steps
|
|
||||||
)
|
|
||||||
|
|
||||||
return SequenceParallelRepeatRandomSampler(
|
|
||||||
dataset=self.train_dataset,
|
|
||||||
mini_repeat_count=self.num_generations,
|
|
||||||
world_size=self.world_size,
|
|
||||||
rank=self.rank,
|
|
||||||
batch_size=effective_batch_size
|
|
||||||
// self.num_generations
|
|
||||||
// self.args.sequence_parallel_degree,
|
|
||||||
repeat_count=self.num_iterations * self.args.gradient_accumulation_steps,
|
|
||||||
sequence_parallel_degree=self.args.sequence_parallel_degree,
|
|
||||||
shuffle=True,
|
|
||||||
seed=self.args.seed,
|
|
||||||
drop_last=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
def _create_dataloader_params(self, is_eval=False, custom_batch_size=None):
|
|
||||||
"""Create common dataloader parameters for train or eval."""
|
|
||||||
batch_size = custom_batch_size or (
|
|
||||||
self.args.eval_batch_size if is_eval else self._train_batch_size
|
|
||||||
)
|
|
||||||
|
|
||||||
params = {
|
|
||||||
"batch_size": batch_size,
|
|
||||||
"collate_fn": self.data_collator,
|
|
||||||
"num_workers": self.args.dataloader_num_workers,
|
|
||||||
"pin_memory": self.args.dataloader_pin_memory,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Add persistent workers only for training
|
|
||||||
if not is_eval and hasattr(self.args, "dataloader_persistent_workers"):
|
|
||||||
params["persistent_workers"] = self.args.dataloader_persistent_workers
|
|
||||||
|
|
||||||
# Add prefetch factor if specified
|
|
||||||
if self.args.dataloader_prefetch_factor:
|
|
||||||
params["prefetch_factor"] = self.args.dataloader_prefetch_factor
|
|
||||||
|
|
||||||
return params
|
|
||||||
|
|
||||||
def _prepare_dataloader(
|
|
||||||
self, dataset, sampler, is_eval=False, custom_batch_size=None
|
|
||||||
):
|
|
||||||
"""Prepare a dataloader with the given dataset and sampler."""
|
|
||||||
# Get base parameters
|
|
||||||
dataloader_params = self._create_dataloader_params(is_eval, custom_batch_size)
|
|
||||||
|
|
||||||
# Add sampler configuration
|
|
||||||
if not isinstance(dataset, torch.utils.data.IterableDataset):
|
|
||||||
if isinstance(sampler, BatchSampler):
|
|
||||||
# batch_size and batch_sampler are mutually exclusive
|
|
||||||
dataloader_params["batch_sampler"] = sampler
|
|
||||||
del dataloader_params["batch_size"]
|
|
||||||
else:
|
|
||||||
dataloader_params["sampler"] = sampler
|
|
||||||
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
|
||||||
|
|
||||||
if not is_eval:
|
|
||||||
dataloader_params["worker_init_fn"] = seed_worker
|
|
||||||
|
|
||||||
# Create the dataloader
|
|
||||||
dataloader = DataLoader(dataset, **dataloader_params)
|
|
||||||
|
|
||||||
if self.args.sample_packing and (
|
|
||||||
(not is_eval and not self.args.pretraining)
|
|
||||||
or (is_eval and self.args.eval_sample_packing is not False)
|
|
||||||
):
|
|
||||||
self.accelerator.even_batches = False
|
|
||||||
|
|
||||||
# Return unprepared dataloader if using sequence parallelism
|
|
||||||
# TODO(djsaunde): We might be able to use `accelerate`'s dataloader preparation
|
|
||||||
# if we use `dispatch_batches` and `slice_fn_for_dispatch` properly (i.e.,
|
|
||||||
# slice each batch along the sequence dimension).
|
|
||||||
if self.args.sequence_parallel_degree > 1:
|
|
||||||
return dataloader
|
|
||||||
|
|
||||||
# Otherwise prepare with accelerator
|
|
||||||
return self.accelerator.prepare_data_loader(dataloader)
|
|
||||||
|
|
||||||
def get_train_dataloader(self) -> DataLoader:
|
|
||||||
"""Get dataloader for training"""
|
|
||||||
train_dataset = self.train_dataset
|
|
||||||
# pylint: disable=access-member-before-definition
|
|
||||||
data_collator = self.data_collator # type: ignore
|
|
||||||
|
|
||||||
# Handle dataset preprocessing
|
|
||||||
if isinstance(train_dataset, datasets.Dataset):
|
|
||||||
# Add debug print before any modifications
|
|
||||||
if self.args.sample_packing and not self.args.pretraining:
|
|
||||||
train_dataset = train_dataset.remove_columns(["length"])
|
|
||||||
if not self.args.sample_packing or self.args.pretraining:
|
|
||||||
train_dataset = self._remove_unused_columns(
|
|
||||||
train_dataset, description="training"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.data_collator = self._get_collator_with_removed_columns( # pylint: disable=attribute-defined-outside-init
|
|
||||||
data_collator,
|
|
||||||
description="training",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get sampler and create dataloader
|
|
||||||
sampler = self._get_train_sampler()
|
|
||||||
dataloader = self._prepare_dataloader(train_dataset, sampler, is_eval=False)
|
|
||||||
|
|
||||||
return dataloader
|
|
||||||
|
|
||||||
def _generate_and_score_completions(
|
|
||||||
self, inputs: list[dict[str, torch.Tensor | Any]]
|
|
||||||
) -> dict[str, torch.Tensor | Any]:
|
|
||||||
device = self.accelerator.device
|
|
||||||
mode = "eval" if self.control.should_evaluate else "train"
|
|
||||||
|
|
||||||
prompts = [x["prompt"] for x in inputs]
|
|
||||||
prompts_text = [
|
|
||||||
maybe_apply_chat_template(example, self.processing_class)["prompt"]
|
|
||||||
for example in inputs
|
|
||||||
]
|
|
||||||
prompt_inputs = self.processing_class(
|
|
||||||
text=prompts_text,
|
|
||||||
return_tensors="pt",
|
|
||||||
padding=True,
|
|
||||||
padding_side="left",
|
|
||||||
add_special_tokens=False,
|
|
||||||
)
|
|
||||||
prompt_inputs = Trainer._prepare_inputs(self, prompt_inputs)
|
|
||||||
prompt_ids, prompt_mask = (
|
|
||||||
prompt_inputs["input_ids"],
|
|
||||||
prompt_inputs["attention_mask"],
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.max_prompt_length is not None:
|
|
||||||
prompt_ids = prompt_ids[:, -self.max_prompt_length :]
|
|
||||||
prompt_mask = prompt_mask[:, -self.max_prompt_length :]
|
|
||||||
|
|
||||||
# Generate completions using either vLLM or regular generation
|
|
||||||
if self.args.use_vllm:
|
|
||||||
# First, have main process load weights if needed
|
|
||||||
# pylint: disable=access-member-before-definition
|
|
||||||
if self.state.global_step != self._last_loaded_step: # type: ignore[has-type]
|
|
||||||
self._move_model_to_vllm()
|
|
||||||
# pylint: disable=attribute-defined-outside-init
|
|
||||||
self._last_loaded_step = self.state.global_step
|
|
||||||
|
|
||||||
# Generate completions using vLLM: gather all prompts and use them in a single call in the main process
|
|
||||||
all_prompts_text = gather_object(prompts_text)
|
|
||||||
if self.accelerator.is_main_process:
|
|
||||||
if self.args.sequence_parallel_degree > 1:
|
|
||||||
# Calculate sequence parallel group information
|
|
||||||
world_size = self.accelerator.num_processes
|
|
||||||
sequence_parallel_degree = self.args.sequence_parallel_degree
|
|
||||||
num_sp_groups = world_size // sequence_parallel_degree
|
|
||||||
|
|
||||||
# Since processes in the same SP group have the same prompts, we need to ensure
|
|
||||||
# we only take one copy of each prompt from each SP group
|
|
||||||
ordered_set_of_prompts = []
|
|
||||||
for sp_group_id in range(num_sp_groups):
|
|
||||||
# Get the first process from each SP group (typically the group leader)
|
|
||||||
group_leader_rank = sp_group_id * sequence_parallel_degree
|
|
||||||
|
|
||||||
# Extract prompts from this SP group, accounting for num_generations duplicates
|
|
||||||
# We only need prompts from one rank in each SP group
|
|
||||||
group_prompts = all_prompts_text[
|
|
||||||
group_leader_rank
|
|
||||||
* len(prompts_text) : (group_leader_rank + 1)
|
|
||||||
* len(prompts_text) : self.num_generations
|
|
||||||
]
|
|
||||||
|
|
||||||
ordered_set_of_prompts.extend(group_prompts)
|
|
||||||
else:
|
|
||||||
# Since 'prompts' contains 'num_generations' duplicates, we first take unique prompts, and generate
|
|
||||||
# num_generations outputs for each one. This is faster than generating outputs for each duplicate
|
|
||||||
# prompt individually.
|
|
||||||
ordered_set_of_prompts = all_prompts_text[
|
|
||||||
:: self.num_generations * self.args.sequence_parallel_degree
|
|
||||||
]
|
|
||||||
|
|
||||||
with profiling_context(self, "vLLM.generate"):
|
|
||||||
completion_ids = self.vllm_client.generate(
|
|
||||||
prompts=ordered_set_of_prompts,
|
|
||||||
n=self.num_generations,
|
|
||||||
repetition_penalty=self.repetition_penalty,
|
|
||||||
temperature=self.temperature,
|
|
||||||
top_p=self.top_p,
|
|
||||||
top_k=-1 if self.top_k is None else self.top_k,
|
|
||||||
min_p=0.0 if self.min_p is None else self.min_p,
|
|
||||||
max_tokens=self.max_completion_length,
|
|
||||||
guided_decoding_regex=self.guided_decoding_regex,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
completion_ids = [None] * (
|
|
||||||
len(all_prompts_text) // self.args.sequence_parallel_degree
|
|
||||||
)
|
|
||||||
|
|
||||||
# Broadcast the completions from the main process to all processes
|
|
||||||
completion_ids = broadcast_object_list(completion_ids, from_process=0)
|
|
||||||
|
|
||||||
# Determine the appropriate slice based on sequence parallelism
|
|
||||||
if self.args.sequence_parallel_degree > 1:
|
|
||||||
# Calculate SP group ID (which group of ranks this rank belongs to)
|
|
||||||
sp_group_id = self.accelerator.process_index // self.local_world_size
|
|
||||||
|
|
||||||
# Calculate the start index for this SP group
|
|
||||||
sp_group_start = sp_group_id * len(prompts) * self.local_world_size
|
|
||||||
|
|
||||||
# All ranks in the same SP group get the same data slice
|
|
||||||
process_slice = slice(
|
|
||||||
sp_group_start,
|
|
||||||
sp_group_start + len(prompts),
|
|
||||||
)
|
|
||||||
completion_ids = completion_ids[process_slice]
|
|
||||||
else:
|
|
||||||
# Original behavior for non-sequence parallel case
|
|
||||||
process_slice = slice(
|
|
||||||
self.accelerator.process_index * len(prompts),
|
|
||||||
(self.accelerator.process_index + 1) * len(prompts),
|
|
||||||
)
|
|
||||||
completion_ids = completion_ids[process_slice]
|
|
||||||
|
|
||||||
# Pad the completions, and concatenate them with the prompts
|
|
||||||
completion_ids = [
|
|
||||||
torch.tensor(ids, device=device) for ids in completion_ids
|
|
||||||
]
|
|
||||||
completion_ids = pad(
|
|
||||||
completion_ids, padding_value=self.processing_class.pad_token_id
|
|
||||||
)
|
|
||||||
prompt_completion_ids = torch.cat([prompt_ids, completion_ids], dim=1)
|
|
||||||
else:
|
|
||||||
# Regular generation path
|
|
||||||
with unwrap_model_for_generation(
|
|
||||||
self.model_wrapped,
|
|
||||||
self.accelerator,
|
|
||||||
gather_deepspeed3_params=self.args.ds3_gather_for_generation,
|
|
||||||
) as unwrapped_model:
|
|
||||||
prompt_completion_ids = unwrapped_model.generate(
|
|
||||||
prompt_ids,
|
|
||||||
attention_mask=prompt_mask,
|
|
||||||
generation_config=self.generation_config,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Compute prompt length and extract completion ids
|
|
||||||
prompt_length = prompt_ids.size(1)
|
|
||||||
prompt_ids = prompt_completion_ids[:, :prompt_length]
|
|
||||||
completion_ids = prompt_completion_ids[:, prompt_length:]
|
|
||||||
|
|
||||||
# Mask everything after the first EOS token
|
|
||||||
is_eos = completion_ids == self.processing_class.eos_token_id
|
|
||||||
eos_idx = torch.full(
|
|
||||||
(is_eos.size(0),), is_eos.size(1), dtype=torch.long, device=device
|
|
||||||
)
|
|
||||||
eos_idx[is_eos.any(dim=1)] = is_eos.int().argmax(dim=1)[is_eos.any(dim=1)]
|
|
||||||
sequence_indices = torch.arange(is_eos.size(1), device=device).expand(
|
|
||||||
is_eos.size(0), -1
|
|
||||||
)
|
|
||||||
completion_mask = (sequence_indices <= eos_idx.unsqueeze(1)).int()
|
|
||||||
|
|
||||||
# If mask_truncated_completions is enabled, zero out truncated completions in completion_mask
|
|
||||||
if self.args.mask_truncated_completions:
|
|
||||||
truncated_completions = ~is_eos.any(dim=1)
|
|
||||||
completion_mask = (
|
|
||||||
completion_mask * (~truncated_completions).unsqueeze(1).int()
|
|
||||||
)
|
|
||||||
|
|
||||||
# Concatenate prompt_mask with completion_mask for logit computation
|
|
||||||
attention_mask = torch.cat([prompt_mask, completion_mask], dim=1) # (B, P+C)
|
|
||||||
|
|
||||||
logits_to_keep = completion_ids.size(
|
|
||||||
1
|
|
||||||
) # we only need to compute the logits for the completion tokens
|
|
||||||
batch_size = (
|
|
||||||
self.args.per_device_train_batch_size
|
|
||||||
if mode == "train"
|
|
||||||
else self.args.per_device_eval_batch_size
|
|
||||||
)
|
|
||||||
|
|
||||||
with torch.no_grad():
|
|
||||||
# When using num_iterations == 1, old_per_token_logps == per_token_logps, so we can skip it's
|
|
||||||
# computation here, and use per_token_logps.detach() instead.
|
|
||||||
if self.num_iterations > 1:
|
|
||||||
old_per_token_logps = self._get_per_token_logps(
|
|
||||||
self.model,
|
|
||||||
prompt_completion_ids,
|
|
||||||
attention_mask,
|
|
||||||
logits_to_keep,
|
|
||||||
batch_size,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
old_per_token_logps = None
|
|
||||||
|
|
||||||
if self.beta == 0.0:
|
|
||||||
ref_per_token_logps = None
|
|
||||||
elif self.ref_model is not None:
|
|
||||||
ref_per_token_logps = self._get_per_token_logps(
|
|
||||||
self.ref_model,
|
|
||||||
prompt_completion_ids,
|
|
||||||
attention_mask,
|
|
||||||
logits_to_keep,
|
|
||||||
batch_size,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
with self.accelerator.unwrap_model(self.model).disable_adapter():
|
|
||||||
ref_per_token_logps = self._get_per_token_logps(
|
|
||||||
self.model,
|
|
||||||
prompt_completion_ids,
|
|
||||||
attention_mask,
|
|
||||||
logits_to_keep,
|
|
||||||
batch_size,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Decode the generated completions
|
|
||||||
completions_text = self.processing_class.batch_decode(
|
|
||||||
completion_ids, skip_special_tokens=True
|
|
||||||
)
|
|
||||||
if is_conversational(inputs[0]):
|
|
||||||
completions = []
|
|
||||||
for prompt, completion in zip(prompts, completions_text):
|
|
||||||
bootstrap = (
|
|
||||||
prompt.pop()["content"] if prompt[-1]["role"] == "assistant" else ""
|
|
||||||
)
|
|
||||||
completions.append(
|
|
||||||
[{"role": "assistant", "content": bootstrap + completion}]
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
completions = completions_text
|
|
||||||
|
|
||||||
rewards_per_func = torch.zeros(
|
|
||||||
len(prompts), len(self.reward_funcs), device=device
|
|
||||||
)
|
|
||||||
for i, (reward_func, reward_processing_class, reward_func_name) in enumerate(
|
|
||||||
zip(
|
|
||||||
self.reward_funcs,
|
|
||||||
self.reward_processing_classes,
|
|
||||||
self.reward_func_names,
|
|
||||||
)
|
|
||||||
):
|
|
||||||
with profiling_context(self, reward_func_name):
|
|
||||||
if isinstance(
|
|
||||||
reward_func, nn.Module
|
|
||||||
): # Module instead of PretrainedModel for compat with compiled models
|
|
||||||
if is_conversational(inputs[0]):
|
|
||||||
messages = [
|
|
||||||
{"messages": p + c} for p, c in zip(prompts, completions)
|
|
||||||
]
|
|
||||||
texts = [
|
|
||||||
apply_chat_template(x, reward_processing_class)["text"]
|
|
||||||
for x in messages
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
texts = [p + c for p, c in zip(prompts, completions)]
|
|
||||||
reward_inputs = reward_processing_class(
|
|
||||||
text=texts,
|
|
||||||
return_tensors="pt",
|
|
||||||
padding=True,
|
|
||||||
padding_side="right",
|
|
||||||
add_special_tokens=False,
|
|
||||||
)
|
|
||||||
reward_inputs = Trainer._prepare_inputs(self, reward_inputs)
|
|
||||||
with torch.inference_mode():
|
|
||||||
rewards_per_func[:, i] = reward_func(**reward_inputs).logits[
|
|
||||||
:, 0
|
|
||||||
] # Shape (B*G,)
|
|
||||||
else:
|
|
||||||
# Repeat all input columns (but "prompt" and "completion") to match the number of generations
|
|
||||||
keys = [
|
|
||||||
key for key in inputs[0] if key not in ["prompt", "completion"]
|
|
||||||
]
|
|
||||||
reward_kwargs = {
|
|
||||||
key: [example[key] for example in inputs] for key in keys
|
|
||||||
}
|
|
||||||
output_reward_func = reward_func(
|
|
||||||
prompts=prompts, completions=completions, **reward_kwargs
|
|
||||||
)
|
|
||||||
# Convert None values to NaN
|
|
||||||
output_reward_func = [
|
|
||||||
reward if reward is not None else torch.nan
|
|
||||||
for reward in output_reward_func
|
|
||||||
]
|
|
||||||
|
|
||||||
rewards_per_func[:, i] = torch.tensor(
|
|
||||||
output_reward_func, dtype=torch.float32, device=device
|
|
||||||
)
|
|
||||||
|
|
||||||
# If all reward functions return None for a given row, issue a detailed warning
|
|
||||||
if torch.isnan(rewards_per_func).all(dim=1).any():
|
|
||||||
nan_row_idx = (
|
|
||||||
torch.isnan(rewards_per_func).all(dim=1).nonzero(as_tuple=True)[0][0]
|
|
||||||
)
|
|
||||||
row_reward_kwargs = {
|
|
||||||
key: value[nan_row_idx] for key, value in reward_kwargs.items()
|
|
||||||
}
|
|
||||||
row_reward_kwargs["prompt"] = prompts[nan_row_idx]
|
|
||||||
row_reward_kwargs["completion"] = completions[nan_row_idx]
|
|
||||||
warnings.warn(
|
|
||||||
f"All reward functions returned None for the following kwargs: {row_reward_kwargs}. "
|
|
||||||
"Please ensure that at least one reward function returns a valid reward."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Gather the reward per function: this part is crucial, because the rewards are normalized per group and the
|
|
||||||
# completions may be distributed across processes
|
|
||||||
rewards_per_func = gather(rewards_per_func)
|
|
||||||
|
|
||||||
# Apply weights to each reward function's output and sum
|
|
||||||
rewards = (
|
|
||||||
rewards_per_func * self.reward_weights.to(device).unsqueeze(0)
|
|
||||||
).nansum(dim=1)
|
|
||||||
|
|
||||||
# Compute grouped-wise rewards
|
|
||||||
mean_grouped_rewards = rewards.view(-1, self.num_generations).mean(dim=1)
|
|
||||||
std_grouped_rewards = rewards.view(-1, self.num_generations).std(dim=1)
|
|
||||||
|
|
||||||
# Normalize the rewards to compute the advantages
|
|
||||||
mean_grouped_rewards = mean_grouped_rewards.repeat_interleave(
|
|
||||||
self.num_generations, dim=0
|
|
||||||
)
|
|
||||||
std_grouped_rewards = std_grouped_rewards.repeat_interleave(
|
|
||||||
self.num_generations, dim=0
|
|
||||||
)
|
|
||||||
advantages = rewards - mean_grouped_rewards
|
|
||||||
if self.args.scale_rewards:
|
|
||||||
advantages = advantages / (std_grouped_rewards + 1e-4)
|
|
||||||
|
|
||||||
# Slice to keep only the local part of the data
|
|
||||||
if self.args.sequence_parallel_degree > 1:
|
|
||||||
# Calculate SP group ID (which group of ranks this rank belongs to)
|
|
||||||
sp_group_id = self.accelerator.process_index // self.local_world_size
|
|
||||||
|
|
||||||
# Calculate the start index for this SP group
|
|
||||||
sp_group_start = sp_group_id * len(prompts) * self.local_world_size
|
|
||||||
|
|
||||||
# All ranks in the same SP group get the same data slice
|
|
||||||
process_slice = slice(
|
|
||||||
sp_group_start,
|
|
||||||
sp_group_start + len(prompts),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Original behavior for non-sequence parallel case
|
|
||||||
process_slice = slice(
|
|
||||||
self.accelerator.process_index * len(prompts),
|
|
||||||
(self.accelerator.process_index + 1) * len(prompts),
|
|
||||||
)
|
|
||||||
advantages = advantages[process_slice]
|
|
||||||
|
|
||||||
# Log the metrics
|
|
||||||
if mode == "train":
|
|
||||||
self._total_train_tokens += (
|
|
||||||
self.accelerator.gather_for_metrics(attention_mask.sum()).sum().item()
|
|
||||||
)
|
|
||||||
self._metrics[mode]["num_tokens"] = [self._total_train_tokens]
|
|
||||||
|
|
||||||
# log completion lengths, mean, min, max
|
|
||||||
agg_completion_mask = self.accelerator.gather_for_metrics(
|
|
||||||
completion_mask.sum(1)
|
|
||||||
)
|
|
||||||
self._metrics[mode]["completions/mean_length"].append(
|
|
||||||
agg_completion_mask.float().mean().item()
|
|
||||||
)
|
|
||||||
self._metrics[mode]["completions/min_length"].append(
|
|
||||||
agg_completion_mask.float().min().item()
|
|
||||||
)
|
|
||||||
self._metrics[mode]["completions/max_length"].append(
|
|
||||||
agg_completion_mask.float().max().item()
|
|
||||||
)
|
|
||||||
|
|
||||||
# identify sequences that terminated with EOS and log their lengths
|
|
||||||
agg_terminated_with_eos = self.accelerator.gather_for_metrics(is_eos.any(dim=1))
|
|
||||||
term_completion_mask = agg_completion_mask[agg_terminated_with_eos]
|
|
||||||
clipped_completions_ratio = 1 - len(term_completion_mask) / len(
|
|
||||||
agg_completion_mask
|
|
||||||
)
|
|
||||||
self._metrics[mode]["completions/clipped_ratio"].append(
|
|
||||||
clipped_completions_ratio
|
|
||||||
)
|
|
||||||
if len(term_completion_mask) == 0:
|
|
||||||
# edge case where no completed sequences are found
|
|
||||||
term_completion_mask = torch.zeros(1, device=device)
|
|
||||||
self._metrics[mode]["completions/mean_terminated_length"].append(
|
|
||||||
term_completion_mask.float().mean().item()
|
|
||||||
)
|
|
||||||
self._metrics[mode]["completions/min_terminated_length"].append(
|
|
||||||
term_completion_mask.float().min().item()
|
|
||||||
)
|
|
||||||
self._metrics[mode]["completions/max_terminated_length"].append(
|
|
||||||
term_completion_mask.float().max().item()
|
|
||||||
)
|
|
||||||
|
|
||||||
# Calculate mean reward per function, but only for samples where the function was applied (non-NaN values)
|
|
||||||
for i, reward_func_name in enumerate(self.reward_func_names):
|
|
||||||
mean_rewards = torch.nanmean(rewards_per_func[:, i]).item()
|
|
||||||
self._metrics[mode][f"rewards/{reward_func_name}/mean"].append(mean_rewards)
|
|
||||||
std_rewards = nanstd(rewards_per_func[:, i]).item()
|
|
||||||
self._metrics[mode][f"rewards/{reward_func_name}/std"].append(std_rewards)
|
|
||||||
self._metrics[mode]["reward"].append(mean_grouped_rewards.mean().item())
|
|
||||||
self._metrics[mode]["reward_std"].append(std_grouped_rewards.mean().item())
|
|
||||||
|
|
||||||
# Log prompt and completion texts
|
|
||||||
self._textual_logs["prompt"].extend(gather_object(prompts_text))
|
|
||||||
self._textual_logs["completion"].extend(gather_object(completions_text))
|
|
||||||
for i, name in enumerate(self.reward_func_names):
|
|
||||||
self._textual_logs["rewards"][name].extend(rewards_per_func[:, i].tolist())
|
|
||||||
|
|
||||||
return {
|
|
||||||
"prompt_ids": prompt_ids,
|
|
||||||
"prompt_mask": prompt_mask,
|
|
||||||
"completion_ids": completion_ids,
|
|
||||||
"completion_mask": completion_mask,
|
|
||||||
"advantages": advantages,
|
|
||||||
"old_per_token_logps": old_per_token_logps,
|
|
||||||
"ref_per_token_logps": ref_per_token_logps,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -6,4 +6,4 @@
|
|||||||
from .optimizer import OptimizerMixin
|
from .optimizer import OptimizerMixin
|
||||||
from .rng_state_loader import RngLoaderMixin
|
from .rng_state_loader import RngLoaderMixin
|
||||||
from .scheduler import SchedulerMixin
|
from .scheduler import SchedulerMixin
|
||||||
from .sequence_parallel import SequenceParallelMixin
|
from .sequence_parallel import SequenceParallelContextManager, SequenceParallelMixin
|
||||||
|
|||||||
@@ -1,13 +1,85 @@
|
|||||||
"""Module for Axolotl trainer sequence parallelism mixin"""
|
"""
|
||||||
|
Module for Axolotl trainer sequence parallelism mixin and training context manager
|
||||||
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import torch
|
||||||
import torch.distributed as dist
|
import torch.distributed as dist
|
||||||
from datasets import Dataset
|
from datasets import Dataset
|
||||||
|
from torch import nn
|
||||||
from torch.utils.data import DistributedSampler, Sampler
|
from torch.utils.data import DistributedSampler, Sampler
|
||||||
|
from torch.utils.hooks import RemovableHandle
|
||||||
|
|
||||||
from axolotl.monkeypatch.attention.ring_attn import (
|
from axolotl.monkeypatch.attention.ring_attn import (
|
||||||
|
RingAttnFunc,
|
||||||
get_ring_attn_group,
|
get_ring_attn_group,
|
||||||
|
update_ring_attn_params,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def apply_sequence_parallelism(
|
||||||
|
batch: dict[str, torch.Tensor],
|
||||||
|
local_rank: int,
|
||||||
|
local_world_size: int,
|
||||||
|
ring_attn_func: RingAttnFunc,
|
||||||
|
) -> dict[str, torch.Tensor]:
|
||||||
|
"""
|
||||||
|
Apply sequence parallelism slicing to a batch.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
batch: Batch dictionary (e.g., input_ids, attention_mask, etc.)
|
||||||
|
local_rank: Local rank in the sequence parallel group
|
||||||
|
local_world_size: World size of the sequence parallel group
|
||||||
|
ring_attn_func: The ring attention function to use
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Sliced batch dictionary.
|
||||||
|
"""
|
||||||
|
# Update ring attention params if needed
|
||||||
|
if batch.get("position_ids") is not None:
|
||||||
|
update_ring_attn_params(position_ids=batch["position_ids"])
|
||||||
|
|
||||||
|
# Slice batch for sequence parallel processing
|
||||||
|
total_seq_len = batch["input_ids"].size(1)
|
||||||
|
for key in batch:
|
||||||
|
if (
|
||||||
|
key in batch
|
||||||
|
and isinstance(batch[key], torch.Tensor)
|
||||||
|
and batch[key].dim() > 1
|
||||||
|
and batch[key].size(1) == total_seq_len
|
||||||
|
):
|
||||||
|
|
||||||
|
if ring_attn_func in [
|
||||||
|
RingAttnFunc.VARLEN_LLAMA3,
|
||||||
|
RingAttnFunc.BATCH_RING,
|
||||||
|
]:
|
||||||
|
# Split in sequential fashion and grab this rank's chunk
|
||||||
|
batch[key] = (
|
||||||
|
batch[key].chunk(local_world_size, dim=1)[local_rank].contiguous()
|
||||||
|
)
|
||||||
|
elif ring_attn_func is RingAttnFunc.BATCH_ZIGZAG:
|
||||||
|
chunks = batch[key].chunk(2 * local_world_size, dim=1)
|
||||||
|
|
||||||
|
# Take rank's chunk and opposing chunk for zigzag pattern
|
||||||
|
selected_chunks = [
|
||||||
|
chunks[local_rank],
|
||||||
|
chunks[2 * local_world_size - local_rank - 1],
|
||||||
|
]
|
||||||
|
batch[key] = torch.cat(selected_chunks, dim=1).contiguous()
|
||||||
|
elif ring_attn_func is RingAttnFunc.BATCH_STRIPE:
|
||||||
|
# Split into striped data and stack
|
||||||
|
tensor = torch.stack(
|
||||||
|
batch[key].split(local_world_size, dim=1),
|
||||||
|
dim=1,
|
||||||
|
).transpose(1, 2)
|
||||||
|
batch[key] = tensor[:, local_rank].contiguous()
|
||||||
|
|
||||||
|
return batch
|
||||||
|
|
||||||
|
|
||||||
class SequenceParallelMixin:
|
class SequenceParallelMixin:
|
||||||
"""
|
"""
|
||||||
@@ -85,3 +157,157 @@ class SequenceParallelMixin:
|
|||||||
return self._create_sequence_parallel_sampler(
|
return self._create_sequence_parallel_sampler(
|
||||||
eval_dataset, shuffle=False, is_eval=True
|
eval_dataset, shuffle=False, is_eval=True
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class SequenceParallelContextManager:
|
||||||
|
"""
|
||||||
|
Context manager for sequence parallelism operations.
|
||||||
|
|
||||||
|
This class provides a context that will automatically apply sequence parallelism
|
||||||
|
during model forward passes using a pre-forward hook, and gather outputs from
|
||||||
|
across the sequence parallelism group using a post-forward hook.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model: nn.Module,
|
||||||
|
sequence_parallel_degree: int,
|
||||||
|
ring_attn_func: RingAttnFunc,
|
||||||
|
):
|
||||||
|
self.model = model
|
||||||
|
self.sequence_parallel_degree = sequence_parallel_degree
|
||||||
|
self.ring_attn_func = ring_attn_func
|
||||||
|
self.process_group = get_ring_attn_group()
|
||||||
|
|
||||||
|
# Initialize sequence parallel group details
|
||||||
|
self.local_rank = dist.get_rank(self.process_group)
|
||||||
|
self.local_world_size = dist.get_world_size(self.process_group)
|
||||||
|
|
||||||
|
# Will store hook handles for removal
|
||||||
|
self.hook_handles: list[RemovableHandle] = []
|
||||||
|
|
||||||
|
# Create a partially applied version of the apply_sequence_parallelism function
|
||||||
|
# with pre-configured params
|
||||||
|
self.apply_sequence_parallelism = functools.partial(
|
||||||
|
apply_sequence_parallelism,
|
||||||
|
local_rank=self.local_rank,
|
||||||
|
local_world_size=self.local_world_size,
|
||||||
|
ring_attn_func=self.ring_attn_func,
|
||||||
|
)
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
# Forward pre-hook to apply sequence parallelism
|
||||||
|
def sequence_parallel_pre_hook(_, args, kwargs):
|
||||||
|
# Apply sequence parallelism to kwargs
|
||||||
|
kwargs = self.apply_sequence_parallelism(batch=kwargs)
|
||||||
|
return args, kwargs
|
||||||
|
|
||||||
|
# Forward post-hook to gather outputs
|
||||||
|
def sequence_parallel_post_hook(_, __, output):
|
||||||
|
# Gather the sharded outputs
|
||||||
|
return self.gather_outputs(output)
|
||||||
|
|
||||||
|
# Register both hooks
|
||||||
|
self.hook_handles.append(
|
||||||
|
self.model.register_forward_pre_hook(
|
||||||
|
sequence_parallel_pre_hook, with_kwargs=True
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self.hook_handles.append(
|
||||||
|
self.model.register_forward_hook(sequence_parallel_post_hook)
|
||||||
|
)
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
# Remove all hooks
|
||||||
|
for handle in self.hook_handles:
|
||||||
|
handle.remove()
|
||||||
|
self.hook_handles = []
|
||||||
|
|
||||||
|
def gather_outputs(self, output):
|
||||||
|
"""Gather sharded outputs from all ranks and reconstruct the full tensor."""
|
||||||
|
# Handle different output formats (dict, tensor, etc.)
|
||||||
|
if isinstance(output, dict):
|
||||||
|
gathered_output = {}
|
||||||
|
for key, value in output.items():
|
||||||
|
if isinstance(value, torch.Tensor) and value.dim() > 1:
|
||||||
|
# Gather logits or other sequence-sharded tensors
|
||||||
|
gathered_value = self.gather_tensor(value)
|
||||||
|
gathered_output[key] = gathered_value
|
||||||
|
else:
|
||||||
|
gathered_value = value.clone()
|
||||||
|
dist.all_reduce(
|
||||||
|
gathered_value, op=dist.ReduceOp.SUM, group=self.process_group
|
||||||
|
)
|
||||||
|
gathered_output[key] = gathered_value
|
||||||
|
return gathered_output
|
||||||
|
if isinstance(output, torch.Tensor):
|
||||||
|
return self.gather_tensor(output)
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
def gather_tensor(self, tensor):
|
||||||
|
"""Gather a sharded tensor from all ranks."""
|
||||||
|
# Prepare tensors for all_gather
|
||||||
|
world_size = self.local_world_size
|
||||||
|
|
||||||
|
# Create list to store tensors from all ranks
|
||||||
|
gathered_tensors = [torch.zeros_like(tensor) for _ in range(world_size)]
|
||||||
|
|
||||||
|
# All-gather operation
|
||||||
|
dist.all_gather(gathered_tensors, tensor, group=self.process_group)
|
||||||
|
|
||||||
|
# Concatenate along sequence dimension (typically dim=1)
|
||||||
|
if self.ring_attn_func in [RingAttnFunc.VARLEN_LLAMA3, RingAttnFunc.BATCH_RING]:
|
||||||
|
# Simple concatenation for standard sharding
|
||||||
|
return torch.cat(gathered_tensors, dim=1)
|
||||||
|
|
||||||
|
if self.ring_attn_func is RingAttnFunc.BATCH_ZIGZAG:
|
||||||
|
# Each rank has a pattern of (rank, world_size*2-rank-1)
|
||||||
|
reconstituted_tensors = [None] * (world_size * 2)
|
||||||
|
|
||||||
|
# First, split each gathered tensor into its two chunks
|
||||||
|
for rank, gathered_tensor in enumerate(gathered_tensors):
|
||||||
|
# Each tensor contains two chunks in the sequence dimension
|
||||||
|
chunk_size = gathered_tensor.size(1) // 2
|
||||||
|
chunk1, chunk2 = gathered_tensor.split(chunk_size, dim=1)
|
||||||
|
|
||||||
|
# Place chunks in their original positions
|
||||||
|
reconstituted_tensors[rank] = chunk1
|
||||||
|
reconstituted_tensors[world_size * 2 - rank - 1] = chunk2
|
||||||
|
|
||||||
|
# Concatenate the reconstituted tensors in the correct order
|
||||||
|
return torch.cat(reconstituted_tensors, dim=1)
|
||||||
|
|
||||||
|
# Otherwise, RingAttnFunc.BATCH_STRIPE
|
||||||
|
# In striping, each rank has every world_size-th slice
|
||||||
|
batch_size = tensor.size(0)
|
||||||
|
hidden_dim = tensor.size(-1)
|
||||||
|
|
||||||
|
# First, determine the full sequence length
|
||||||
|
total_seq_len = 0
|
||||||
|
for t in gathered_tensors:
|
||||||
|
total_seq_len += t.size(1)
|
||||||
|
|
||||||
|
# Create a tensor to hold the unstriped result
|
||||||
|
result = torch.zeros(
|
||||||
|
batch_size,
|
||||||
|
total_seq_len,
|
||||||
|
hidden_dim,
|
||||||
|
dtype=tensor.dtype,
|
||||||
|
device=tensor.device,
|
||||||
|
)
|
||||||
|
|
||||||
|
# For each rank's tensor, distribute its slices to the correct positions
|
||||||
|
for rank, gathered_tensor in enumerate(gathered_tensors):
|
||||||
|
# The rank's tensor contains every world_size-th slice
|
||||||
|
# starting from its rank position
|
||||||
|
seq_len = gathered_tensor.size(1)
|
||||||
|
for i in range(seq_len):
|
||||||
|
# Calculate the position in the full tensor
|
||||||
|
pos = i * world_size + rank
|
||||||
|
if pos < total_seq_len:
|
||||||
|
result[:, pos] = gathered_tensor[:, i]
|
||||||
|
|
||||||
|
return result
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ from PIL.Image import Resampling
|
|||||||
from transformers import TrainingArguments
|
from transformers import TrainingArguments
|
||||||
from trl import CPOConfig, KTOConfig, ORPOConfig, PRMConfig, RewardConfig
|
from trl import CPOConfig, KTOConfig, ORPOConfig, PRMConfig, RewardConfig
|
||||||
|
|
||||||
from axolotl.utils.schemas.enums import RingAttnFunc
|
from axolotl.monkeypatch.attention.ring_attn.patch import RingAttnFunc
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
|||||||
@@ -1,108 +0,0 @@
|
|||||||
# LLMCompressor Integration
|
|
||||||
|
|
||||||
Fine-tune sparsified models in Axolotl using Neural Magic's [LLMCompressor](https://github.com/vllm-project/llm-compressor).
|
|
||||||
|
|
||||||
This integration enables fine-tuning of models sparsified using LLMCompressor within the Axolotl training framework. By combining LLMCompressor's model compression capabilities with Axolotl's distributed training pipelines, users can efficiently fine-tune sparse models at scale.
|
|
||||||
|
|
||||||
It uses Axolotl’s plugin system to hook into the fine-tuning flows while maintaining sparsity throughout training.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
- Axolotl with `llmcompressor` extras:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install "axolotl[llmcompressor]"
|
|
||||||
```
|
|
||||||
|
|
||||||
- Requires `llmcompressor >= 0.5.1`
|
|
||||||
|
|
||||||
This will install all necessary dependencies to fine-tune sparsified models using the integration.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
To enable sparse fine-tuning with this integration, include the plugin in your Axolotl config:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
plugins:
|
|
||||||
- axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
|
||||||
|
|
||||||
llmcompressor:
|
|
||||||
recipe:
|
|
||||||
finetuning_stage:
|
|
||||||
finetuning_modifiers:
|
|
||||||
ConstantPruningModifier:
|
|
||||||
targets: [
|
|
||||||
're:.*q_proj.weight',
|
|
||||||
're:.*k_proj.weight',
|
|
||||||
're:.*v_proj.weight',
|
|
||||||
're:.*o_proj.weight',
|
|
||||||
're:.*gate_proj.weight',
|
|
||||||
're:.*up_proj.weight',
|
|
||||||
're:.*down_proj.weight',
|
|
||||||
]
|
|
||||||
start: 0
|
|
||||||
save_compressed: true
|
|
||||||
# ... (other training arguments)
|
|
||||||
```
|
|
||||||
|
|
||||||
This plugin **does not apply pruning or sparsification itself** — it is intended for **fine-tuning models that have already been sparsified**.
|
|
||||||
|
|
||||||
Pre-sparsified checkpoints can be:
|
|
||||||
- Generated using [LLMCompressor](https://github.com/vllm-project/llm-compressor)
|
|
||||||
- Downloaded from [Neural Magic's Hugging Face page](https://huggingface.co/neuralmagic)
|
|
||||||
- Any custom LLM with compatible sparsity patterns that you've created yourself
|
|
||||||
|
|
||||||
To learn more about writing and customizing LLMCompressor recipes, refer to the official documentation:
|
|
||||||
[https://github.com/vllm-project/llm-compressor/blob/main/README.md](https://github.com/vllm-project/llm-compressor/blob/main/README.md)
|
|
||||||
|
|
||||||
### Storage Optimization with save_compressed
|
|
||||||
|
|
||||||
Setting `save_compressed: true` in your configuration enables saving models in a compressed format, which:
|
|
||||||
- Reduces disk space usage by approximately 40%
|
|
||||||
- Maintains compatibility with vLLM for accelerated inference
|
|
||||||
- Maintains compatibility with llmcompressor for further optimization (example: quantization)
|
|
||||||
|
|
||||||
This option is highly recommended when working with sparse models to maximize the benefits of model compression.
|
|
||||||
|
|
||||||
### Example Config
|
|
||||||
|
|
||||||
See [`examples/llama-3/sparse-finetuning.yaml`](examples/llama-3/sparse-finetuning.yaml) for a complete example.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Inference with vLLM
|
|
||||||
|
|
||||||
After fine-tuning your sparse model, you can leverage vLLM for efficient inference.
|
|
||||||
You can also use LLMCompressor to apply additional quantization to your fine-tuned
|
|
||||||
sparse model before inference for even greater performance benefits.:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from vllm import LLM, SamplingParams
|
|
||||||
|
|
||||||
prompts = [
|
|
||||||
"Hello, my name is",
|
|
||||||
"The president of the United States is",
|
|
||||||
"The capital of France is",
|
|
||||||
"The future of AI is",
|
|
||||||
]
|
|
||||||
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
|
|
||||||
llm = LLM("path/to/your/sparse/model")
|
|
||||||
outputs = llm.generate(prompts, sampling_params)
|
|
||||||
|
|
||||||
for output in outputs:
|
|
||||||
prompt = output.prompt
|
|
||||||
generated_text = output.outputs[0].text
|
|
||||||
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
|
||||||
```
|
|
||||||
|
|
||||||
For more details on vLLM's capabilities and advanced configuration options, see the [official vLLM documentation](https://docs.vllm.ai/).
|
|
||||||
|
|
||||||
## Learn More
|
|
||||||
|
|
||||||
For details on available sparsity and quantization schemes, fine-tuning recipes, and usage examples, visit the official LLMCompressor repository:
|
|
||||||
|
|
||||||
[https://github.com/vllm-project/llm-compressor](https://github.com/vllm-project/llm-compressor)
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
"""Integration entry point for the LLMCompressor plugin."""
|
|
||||||
|
|
||||||
from .plugin import LLMCompressorPlugin
|
|
||||||
|
|
||||||
__all__ = ["LLMCompressorPlugin"]
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
"""
|
|
||||||
LLMCompressor and Sparse Finetuning config models.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from typing_extensions import Annotated
|
|
||||||
|
|
||||||
|
|
||||||
class CompressionArgs(BaseModel):
|
|
||||||
"""Sparse Finetuning config for LLMCompressor."""
|
|
||||||
|
|
||||||
# Typing for recipe is set to Any due to:
|
|
||||||
# https://github.com/vllm-project/llm-compressor/issues/1319
|
|
||||||
recipe: Annotated[
|
|
||||||
Any,
|
|
||||||
Field(
|
|
||||||
description="The recipe containing the compression algorithms and hyperparameters to apply."
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
save_compressed: Annotated[
|
|
||||||
bool,
|
|
||||||
Field(
|
|
||||||
default=False,
|
|
||||||
description="Whether to save the compressed model after training.",
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class LLMCompressorArgs(BaseModel):
|
|
||||||
"""LLMCompressor configuration BaseModel."""
|
|
||||||
|
|
||||||
llmcompressor: Annotated[
|
|
||||||
CompressionArgs,
|
|
||||||
Field(
|
|
||||||
description="Arguments enabling compression pathways through the LLM Compressor plugins"
|
|
||||||
),
|
|
||||||
]
|
|
||||||
@@ -1,171 +0,0 @@
|
|||||||
"""
|
|
||||||
Sparse Finetuning plugin for Axolotl — enables handling of sparse neural networks
|
|
||||||
by maintaining masks for zero weights during training.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
from functools import wraps
|
|
||||||
from typing import Any, Callable, Concatenate, ParamSpec, TypeVar
|
|
||||||
|
|
||||||
from llmcompressor import active_session, create_session
|
|
||||||
from llmcompressor.core import callbacks as session_callbacks
|
|
||||||
from llmcompressor.recipe import Recipe
|
|
||||||
from torch.nn import Module
|
|
||||||
from transformers.trainer import Trainer
|
|
||||||
from transformers.trainer_callback import TrainerCallback, TrainerControl, TrainerState
|
|
||||||
from transformers.training_args import TrainingArguments
|
|
||||||
|
|
||||||
from axolotl.integrations.base import BasePlugin
|
|
||||||
|
|
||||||
P = ParamSpec("P") # Params for generic function signatures
|
|
||||||
R = TypeVar("R") # Return type for generic function signatures
|
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.integrations.llm_compressor")
|
|
||||||
|
|
||||||
|
|
||||||
class LLMCompressorCallbackHandler(TrainerCallback):
|
|
||||||
"""
|
|
||||||
Trainer callback for Sparse Finetuning.
|
|
||||||
Maintains sparsity patterns during training by applying masks after optimization steps,
|
|
||||||
ensuring zero-weight updates are canceled out.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, trainer: Trainer, recipe: Any):
|
|
||||||
"""
|
|
||||||
Initialize the Sparse Finetuning callback handler.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
trainer (Trainer): Huggingface Trainer instance.
|
|
||||||
recipe (Recipe | dict): Sparse finetuning recipe to apply.
|
|
||||||
"""
|
|
||||||
super().__init__()
|
|
||||||
self.trainer = trainer
|
|
||||||
self.recipe = (
|
|
||||||
Recipe.model_validate(recipe) if not isinstance(recipe, Recipe) else recipe
|
|
||||||
)
|
|
||||||
self.original_compute_loss = trainer.compute_loss
|
|
||||||
self.trainer.compute_loss = compute_loss_wrapper(self.trainer.compute_loss)
|
|
||||||
create_session()
|
|
||||||
|
|
||||||
def on_train_begin(
|
|
||||||
self,
|
|
||||||
args: TrainingArguments,
|
|
||||||
state: TrainerState,
|
|
||||||
control: TrainerControl,
|
|
||||||
**kwargs,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Called at the beginning of training. Initializes the compression session.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
args (TrainingArguments): Training arguments.
|
|
||||||
state (TrainerState): Trainer state.
|
|
||||||
control (TrainerControl): Trainer control.
|
|
||||||
"""
|
|
||||||
super().on_train_begin(args, state, control, **kwargs)
|
|
||||||
self.trainer.accelerator.wait_for_everyone()
|
|
||||||
active_session().initialize(
|
|
||||||
model=self.trainer.model,
|
|
||||||
optimizer=self.trainer.optimizer,
|
|
||||||
start=state.epoch,
|
|
||||||
recipe=self.recipe,
|
|
||||||
)
|
|
||||||
self.trainer.accelerator.wait_for_everyone()
|
|
||||||
|
|
||||||
def on_step_begin(
|
|
||||||
self,
|
|
||||||
args: TrainingArguments,
|
|
||||||
state: TrainerState,
|
|
||||||
control: TrainerControl,
|
|
||||||
**kwargs,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Called at the beginning of a training step. Triggers batch_start callback.
|
|
||||||
"""
|
|
||||||
super().on_step_begin(args, state, control, **kwargs)
|
|
||||||
session_callbacks.batch_start()
|
|
||||||
|
|
||||||
def on_step_end(
|
|
||||||
self,
|
|
||||||
args: TrainingArguments,
|
|
||||||
state: TrainerState,
|
|
||||||
control: TrainerControl,
|
|
||||||
**kwargs,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Called at the end of a training step. Triggers optimizer and batch_end callbacks.
|
|
||||||
"""
|
|
||||||
super().on_step_end(args, state, control, **kwargs)
|
|
||||||
session_callbacks.optim_pre_step()
|
|
||||||
session_callbacks.optim_post_step()
|
|
||||||
session_callbacks.batch_end()
|
|
||||||
|
|
||||||
def on_train_end(
|
|
||||||
self,
|
|
||||||
args: TrainingArguments,
|
|
||||||
state: TrainerState,
|
|
||||||
control: TrainerControl,
|
|
||||||
**kwargs,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Called at the end of training. Finalizes the compression session.
|
|
||||||
"""
|
|
||||||
super().on_train_end(args, state, control, **kwargs)
|
|
||||||
active_session().finalize()
|
|
||||||
self.trainer.compute_loss_func = self.original_compute_loss
|
|
||||||
|
|
||||||
|
|
||||||
class LLMCompressorPlugin(BasePlugin):
|
|
||||||
"""
|
|
||||||
Sparse Finetuning plugin for Axolotl integration.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_input_args(self) -> str:
|
|
||||||
"""
|
|
||||||
Returns the path to the plugin's argument definition.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: Dotted path to the LLMCompressorArgs class.
|
|
||||||
"""
|
|
||||||
return "axolotl.integrations.llm_compressor.args.LLMCompressorArgs"
|
|
||||||
|
|
||||||
def add_callbacks_post_trainer(self, cfg: Any, trainer: Trainer) -> list:
|
|
||||||
"""
|
|
||||||
Adds Sparse Finetuning callback to the Trainer instance.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cfg (Any): Configuration object containing the sparse recipe.
|
|
||||||
trainer (Trainer): Huggingface Trainer instance.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
list: List containing the configured callback instances.
|
|
||||||
"""
|
|
||||||
LOG.info("Adding Sparse Finetuning callback to the trainer")
|
|
||||||
callback = LLMCompressorCallbackHandler(
|
|
||||||
trainer=trainer,
|
|
||||||
recipe=cfg.llmcompressor.recipe,
|
|
||||||
)
|
|
||||||
return [callback]
|
|
||||||
|
|
||||||
|
|
||||||
def compute_loss_wrapper(
|
|
||||||
compute_loss_func: Callable[Concatenate[Module, P], R],
|
|
||||||
) -> Callable[Concatenate[Module, P], R]:
|
|
||||||
"""
|
|
||||||
Wraps the loss computation function to trigger the loss_calculated callback.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
compute_loss_func (Callable): Original loss computation function.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Callable: Wrapped function that also invokes the loss_calculated callback.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@wraps(compute_loss_func)
|
|
||||||
def compute_and_notify(model: Module, *args: P.args, **kwargs: P.kwargs) -> R:
|
|
||||||
loss = compute_loss_func(model, *args, **kwargs)
|
|
||||||
if active_session().lifecycle.initialized_ and model.training:
|
|
||||||
session_callbacks.loss_calculated(loss=loss)
|
|
||||||
return loss
|
|
||||||
|
|
||||||
return compute_and_notify
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
"""Utilities for llmcompressor integration with axolotl."""
|
|
||||||
|
|
||||||
from typing import Union
|
|
||||||
|
|
||||||
from llmcompressor.transformers.sparsification.compressed_tensors_utils import (
|
|
||||||
modify_save_pretrained,
|
|
||||||
)
|
|
||||||
from transformers import PreTrainedModel, Trainer
|
|
||||||
|
|
||||||
|
|
||||||
def save_compressed_model(
|
|
||||||
model: PreTrainedModel,
|
|
||||||
output_dir: Union[str, bytes],
|
|
||||||
trainer: Trainer,
|
|
||||||
safe_serialization: bool = False,
|
|
||||||
save_compressed: bool = False,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Synchronize processes, apply compression hooks, and save the model.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model (PreTrainedModel): The model to be saved.
|
|
||||||
output_dir (str or bytes): Path where the model files will be written.
|
|
||||||
trainer (Trainer): Hugging Face Trainer for process synchronization.
|
|
||||||
safe_serialization (bool): Use safe serialization if True.
|
|
||||||
save_compressed (bool): Write compressed tensors if True.
|
|
||||||
"""
|
|
||||||
trainer.accelerator.wait_for_everyone()
|
|
||||||
|
|
||||||
# Only the main process writes the files
|
|
||||||
if not trainer.accelerator.is_main_process:
|
|
||||||
return
|
|
||||||
|
|
||||||
modify_save_pretrained(model)
|
|
||||||
model.save_pretrained(
|
|
||||||
output_dir,
|
|
||||||
safe_serialization=safe_serialization,
|
|
||||||
save_compressed=save_compressed,
|
|
||||||
skip_sparsity_compression_stats=not save_compressed,
|
|
||||||
)
|
|
||||||
@@ -4,6 +4,7 @@
|
|||||||
# flake8: noqa
|
# flake8: noqa
|
||||||
|
|
||||||
from .patch import (
|
from .patch import (
|
||||||
|
RingAttnFunc,
|
||||||
get_ring_attn_group,
|
get_ring_attn_group,
|
||||||
register_ring_attn,
|
register_ring_attn,
|
||||||
set_ring_attn_group,
|
set_ring_attn_group,
|
||||||
|
|||||||
@@ -16,7 +16,11 @@ import torch
|
|||||||
import torch.distributed as dist
|
import torch.distributed as dist
|
||||||
import transformers
|
import transformers
|
||||||
import transformers.modeling_flash_attention_utils
|
import transformers.modeling_flash_attention_utils
|
||||||
from ring_flash_attn import ring_flash_attn_func
|
from ring_flash_attn import (
|
||||||
|
ring_flash_attn_func,
|
||||||
|
stripe_flash_attn_func,
|
||||||
|
zigzag_ring_flash_attn_func,
|
||||||
|
)
|
||||||
from ring_flash_attn.adapters.hf_adapter import check_params
|
from ring_flash_attn.adapters.hf_adapter import check_params
|
||||||
from transformers.modeling_flash_attention_utils import (
|
from transformers.modeling_flash_attention_utils import (
|
||||||
_flash_supports_window_size,
|
_flash_supports_window_size,
|
||||||
@@ -24,12 +28,12 @@ from transformers.modeling_flash_attention_utils import (
|
|||||||
)
|
)
|
||||||
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
|
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
|
||||||
|
|
||||||
from axolotl.utils.schemas.enums import RingAttnFunc
|
from axolotl.monkeypatch.attention.ring_attn.patch import RingAttnFunc
|
||||||
|
|
||||||
RING_ATTN_FUNC_MAPPING = {
|
RING_ATTN_FUNC_MAPPING = {
|
||||||
RingAttnFunc.BATCH_RING: torch.compile(ring_flash_attn_func),
|
RingAttnFunc.BATCH_RING: ring_flash_attn_func,
|
||||||
# RingAttnFunc.BATCH_ZIGZAG: torch.compile(zigzag_ring_flash_attn_func),
|
RingAttnFunc.BATCH_ZIGZAG: zigzag_ring_flash_attn_func,
|
||||||
# RingAttnFunc.BATCH_STRIPE: torch.compile(stripe_flash_attn_func),
|
RingAttnFunc.BATCH_STRIPE: stripe_flash_attn_func,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -6,12 +6,13 @@ package, specifically the `hf_adapter.substitute_hf_flash_attn` function to patc
|
|||||||
their sequence parallel version of Flash Attention 2.
|
their sequence parallel version of Flash Attention 2.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import torch.distributed as dist
|
import torch.distributed as dist
|
||||||
from accelerate.logging import get_logger
|
from accelerate.logging import get_logger
|
||||||
|
|
||||||
from axolotl.monkeypatch.utils import get_cu_seqlens_from_pos_ids
|
from axolotl.monkeypatch.utils import get_cu_seqlens_from_pos_ids
|
||||||
from axolotl.utils.schemas.enums import RingAttnFunc
|
|
||||||
|
|
||||||
LOG = get_logger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
@@ -40,6 +41,17 @@ def set_ring_attn_group(ring_attn_group: dist.ProcessGroup | None):
|
|||||||
RING_ATTN_GROUP = ring_attn_group
|
RING_ATTN_GROUP = ring_attn_group
|
||||||
|
|
||||||
|
|
||||||
|
class RingAttnFunc(str, Enum):
|
||||||
|
"""Enum class for supported `ring-flash-attn` implementations"""
|
||||||
|
|
||||||
|
# VARLEN_RING = "varlen_ring"
|
||||||
|
# VARLEN_ZIGZAG = "varlen_zigzag"
|
||||||
|
VARLEN_LLAMA3 = "varlen_llama3"
|
||||||
|
BATCH_RING = "batch_ring"
|
||||||
|
BATCH_ZIGZAG = "batch_zigzag"
|
||||||
|
BATCH_STRIPE = "batch_stripe"
|
||||||
|
|
||||||
|
|
||||||
def register_ring_attn(
|
def register_ring_attn(
|
||||||
sequence_parallel_degree: int,
|
sequence_parallel_degree: int,
|
||||||
heads_k_stride: int | None,
|
heads_k_stride: int | None,
|
||||||
@@ -105,7 +117,11 @@ def register_ring_attn(
|
|||||||
substitute_hf_flash_attn(
|
substitute_hf_flash_attn(
|
||||||
process_group=get_ring_attn_group(), heads_k_stride=heads_k_stride or 1
|
process_group=get_ring_attn_group(), heads_k_stride=heads_k_stride or 1
|
||||||
)
|
)
|
||||||
elif ring_attn_func is RingAttnFunc.BATCH_RING:
|
elif ring_attn_func in [
|
||||||
|
RingAttnFunc.BATCH_RING,
|
||||||
|
RingAttnFunc.BATCH_ZIGZAG,
|
||||||
|
RingAttnFunc.BATCH_STRIPE,
|
||||||
|
]:
|
||||||
from axolotl.monkeypatch.attention.ring_attn.adapters.batch import (
|
from axolotl.monkeypatch.attention.ring_attn.adapters.batch import (
|
||||||
substitute_hf_flash_attn,
|
substitute_hf_flash_attn,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import os
|
|||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
import weakref
|
import weakref
|
||||||
from contextlib import ExitStack
|
from contextlib import nullcontext
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
@@ -27,13 +27,14 @@ from axolotl.contribs.lgpl import ( # pylint: disable = no-name-in-module
|
|||||||
fix_untrained_tokens,
|
fix_untrained_tokens,
|
||||||
)
|
)
|
||||||
from axolotl.core.trainer_builder import HFCausalTrainerBuilder, HFRLTrainerBuilder
|
from axolotl.core.trainer_builder import HFCausalTrainerBuilder, HFRLTrainerBuilder
|
||||||
|
from axolotl.core.trainers.mixins.sequence_parallel import (
|
||||||
|
SequenceParallelContextManager,
|
||||||
|
)
|
||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.utils.ctx_managers.sequence_parallel import SequenceParallelContextManager
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.distributed import cleanup_distributed
|
from axolotl.utils.distributed import cleanup_distributed
|
||||||
from axolotl.utils.freeze import freeze_layers_except
|
from axolotl.utils.freeze import freeze_layers_except
|
||||||
from axolotl.utils.models import load_model, load_processor, load_tokenizer
|
from axolotl.utils.models import load_model, load_processor, load_tokenizer
|
||||||
from axolotl.utils.schemas.enums import RLType
|
|
||||||
from axolotl.utils.trainer import setup_trainer
|
from axolotl.utils.trainer import setup_trainer
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -106,7 +107,7 @@ def setup_reference_model(
|
|||||||
Reference model if needed for RL training, `None` otherwise.
|
Reference model if needed for RL training, `None` otherwise.
|
||||||
"""
|
"""
|
||||||
model_ref = None
|
model_ref = None
|
||||||
if cfg.rl and cfg.rl != RLType.ORPO:
|
if cfg.rl and cfg.rl != "orpo":
|
||||||
if cfg.adapter and not cfg.rl_adapter_ref_model:
|
if cfg.adapter and not cfg.rl_adapter_ref_model:
|
||||||
# use built-in trl autounwrap
|
# use built-in trl autounwrap
|
||||||
LOG.debug("Passing model_ref: None to RL trainer")
|
LOG.debug("Passing model_ref: None to RL trainer")
|
||||||
@@ -187,32 +188,28 @@ def execute_training(
|
|||||||
trainer: The configured trainer object.
|
trainer: The configured trainer object.
|
||||||
resume_from_checkpoint: Path to checkpoint to resume from, if applicable.
|
resume_from_checkpoint: Path to checkpoint to resume from, if applicable.
|
||||||
"""
|
"""
|
||||||
with ExitStack() as stack:
|
# Define the context managers to use
|
||||||
# Define the context managers to use
|
flash_context = (
|
||||||
if cfg.flash_optimum:
|
torch.backends.cuda.sdp_kernel(
|
||||||
stack.enter_context(
|
enable_flash=True,
|
||||||
torch.backends.cuda.sdp_kernel(
|
enable_math=True,
|
||||||
enable_flash=True,
|
enable_mem_efficient=True,
|
||||||
enable_math=True,
|
)
|
||||||
enable_mem_efficient=True,
|
if cfg.flash_optimum
|
||||||
)
|
else nullcontext()
|
||||||
)
|
)
|
||||||
|
sequence_parallel_context = (
|
||||||
|
SequenceParallelContextManager(
|
||||||
|
model=trainer.model,
|
||||||
|
sequence_parallel_degree=cfg.sequence_parallel_degree,
|
||||||
|
ring_attn_func=cfg.ring_attn_func,
|
||||||
|
)
|
||||||
|
if cfg.sequence_parallel_degree > 1
|
||||||
|
else nullcontext()
|
||||||
|
)
|
||||||
|
|
||||||
if cfg.sequence_parallel_degree > 1:
|
LOG.info("Starting trainer...")
|
||||||
models = [trainer.model]
|
with flash_context, sequence_parallel_context:
|
||||||
if hasattr(trainer, "ref_model"):
|
|
||||||
models.append(trainer.ref_model)
|
|
||||||
|
|
||||||
stack.enter_context(
|
|
||||||
SequenceParallelContextManager(
|
|
||||||
models=models,
|
|
||||||
sequence_parallel_degree=cfg.sequence_parallel_degree,
|
|
||||||
gradient_accumulation_steps=cfg.gradient_accumulation_steps,
|
|
||||||
ring_attn_func=cfg.ring_attn_func,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
LOG.info("Starting trainer...")
|
|
||||||
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
|
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
|
||||||
|
|
||||||
|
|
||||||
@@ -297,23 +294,8 @@ def save_trained_model(
|
|||||||
trainer.model.save_pretrained(
|
trainer.model.save_pretrained(
|
||||||
cfg.output_dir, safe_serialization=safe_serialization
|
cfg.output_dir, safe_serialization=safe_serialization
|
||||||
)
|
)
|
||||||
|
|
||||||
model.save_pretrained(cfg.output_dir, safe_serialization=safe_serialization)
|
model.save_pretrained(cfg.output_dir, safe_serialization=safe_serialization)
|
||||||
|
|
||||||
if hasattr(cfg, "llmcompressor") and cfg.llmcompressor:
|
|
||||||
# TODO: add integration support so this can be implemented completely within the plugin
|
|
||||||
from axolotl.integrations.llm_compressor.utils import (
|
|
||||||
save_compressed_model,
|
|
||||||
)
|
|
||||||
|
|
||||||
save_compressed_model(
|
|
||||||
model=model,
|
|
||||||
output_dir=cfg.output_dir,
|
|
||||||
trainer=trainer,
|
|
||||||
safe_serialization=safe_serialization,
|
|
||||||
save_compressed=cfg.llmcompressor.save_compressed,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def create_model_card(cfg: DictDefault, trainer: Trainer):
|
def create_model_card(cfg: DictDefault, trainer: Trainer):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
"""Init for context manager submodule"""
|
|
||||||
|
|
||||||
# pylint: disable=unused-import
|
|
||||||
# flake8: noqa
|
|
||||||
|
|
||||||
from .sequence_parallel import SequenceParallelContextManager
|
|
||||||
@@ -1,335 +0,0 @@
|
|||||||
"""Module for Axolotl trainer sequence parallelism manager and utilities"""
|
|
||||||
|
|
||||||
import functools
|
|
||||||
|
|
||||||
import torch
|
|
||||||
import torch.distributed as dist
|
|
||||||
from torch import nn
|
|
||||||
from torch.utils.hooks import RemovableHandle
|
|
||||||
from transformers.modeling_outputs import CausalLMOutputWithPast
|
|
||||||
from transformers.utils import ModelOutput
|
|
||||||
|
|
||||||
from axolotl.monkeypatch.attention.ring_attn.patch import (
|
|
||||||
get_ring_attn_group,
|
|
||||||
update_ring_attn_params,
|
|
||||||
)
|
|
||||||
from axolotl.utils.schemas.enums import RingAttnFunc
|
|
||||||
|
|
||||||
|
|
||||||
# TODO(djsaunde): implement zigzag, stripe patterns here (and elsewhere) in this
|
|
||||||
# module. Currently, we just focus on batch ring and varlen llama3 for simplicity.
|
|
||||||
def apply_sequence_parallelism(
|
|
||||||
batch: dict[str, torch.Tensor],
|
|
||||||
local_rank: int,
|
|
||||||
local_world_size: int,
|
|
||||||
gradient_accumulation_steps: int,
|
|
||||||
ring_attn_func: RingAttnFunc, # pylint: disable=unused-argument
|
|
||||||
) -> tuple[dict[str, torch.Tensor], int, int]:
|
|
||||||
"""
|
|
||||||
Apply sequence parallelism slicing to a batch.
|
|
||||||
|
|
||||||
Special handling is implemented for integer logits_to_keep, which indicates
|
|
||||||
to only keep the last N tokens in the sequence during generation.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
batch: Batch dictionary (e.g., input_ids, attention_mask, etc.).
|
|
||||||
local_rank: Local rank in the sequence parallel group.
|
|
||||||
local_world_size: World size of the sequence parallel group.
|
|
||||||
gradient_accumulation_steps: Number of steps to accumulate gradients over.
|
|
||||||
ring_attn_func: Which ring attention function to use. Currently unused, but
|
|
||||||
related to above TODO.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
tuple of:
|
|
||||||
- Batch dictionary with sliced tensors.
|
|
||||||
- The original sequence length before padding.
|
|
||||||
- The number of padding tokens added.
|
|
||||||
"""
|
|
||||||
original_seq_len = batch["input_ids"].size(1)
|
|
||||||
|
|
||||||
# Update ring attention params if needed
|
|
||||||
if batch.get("position_ids") is not None:
|
|
||||||
update_ring_attn_params(position_ids=batch["position_ids"])
|
|
||||||
else:
|
|
||||||
# If position_ids aren't already in the batch, create them
|
|
||||||
batch["position_ids"] = torch.arange(
|
|
||||||
0,
|
|
||||||
original_seq_len,
|
|
||||||
dtype=torch.long,
|
|
||||||
device=batch["input_ids"].device,
|
|
||||||
).expand(batch["input_ids"].size(0), -1)
|
|
||||||
|
|
||||||
if "logits_to_keep" in batch and isinstance(batch["logits_to_keep"], int):
|
|
||||||
logits_to_keep = batch["logits_to_keep"]
|
|
||||||
|
|
||||||
# Calculate which positions in the full sequence contain the last N tokens
|
|
||||||
start_position = max(0, original_seq_len - logits_to_keep)
|
|
||||||
chunk_size = original_seq_len // local_world_size
|
|
||||||
rank_start = local_rank * chunk_size
|
|
||||||
rank_end = rank_start + chunk_size
|
|
||||||
|
|
||||||
# Create a boolean mask tensor for this rank's chunk
|
|
||||||
mask = torch.zeros(
|
|
||||||
chunk_size,
|
|
||||||
dtype=torch.bool,
|
|
||||||
device=batch["input_ids"].device,
|
|
||||||
)
|
|
||||||
|
|
||||||
if rank_end > start_position:
|
|
||||||
# Calculate how many of the last N tokens fall within this rank's range
|
|
||||||
tokens_in_rank = min(rank_end, original_seq_len) - max(
|
|
||||||
rank_start, start_position
|
|
||||||
)
|
|
||||||
|
|
||||||
# Calculate where these tokens start in the local chunk
|
|
||||||
local_start_idx = max(0, start_position - rank_start)
|
|
||||||
|
|
||||||
# Set the appropriate positions in the mask to True
|
|
||||||
mask[local_start_idx : local_start_idx + tokens_in_rank] = True
|
|
||||||
|
|
||||||
# Replace the integer with the boolean mask
|
|
||||||
batch["logits_to_keep"] = mask
|
|
||||||
|
|
||||||
# Add padding to make sequence length divisible by local_world_size
|
|
||||||
total_seq_len = original_seq_len
|
|
||||||
pad_len = 0
|
|
||||||
divisor = min(local_world_size, 64)
|
|
||||||
if total_seq_len % divisor != 0:
|
|
||||||
pad_len = divisor - (total_seq_len % divisor)
|
|
||||||
|
|
||||||
# Apply padding to all relevant tensors
|
|
||||||
for key in batch:
|
|
||||||
if (
|
|
||||||
isinstance(batch[key], torch.Tensor)
|
|
||||||
and batch[key].dim() > 1
|
|
||||||
and batch[key].size(1) == total_seq_len
|
|
||||||
):
|
|
||||||
# Create padding tensor
|
|
||||||
pad_value = -100 if key == "labels" else 0
|
|
||||||
padding = torch.full(
|
|
||||||
(batch[key].size(0), pad_len, *batch[key].shape[2:]),
|
|
||||||
pad_value,
|
|
||||||
dtype=batch[key].dtype,
|
|
||||||
device=batch[key].device,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Concatenate padding to the right side of the tensor
|
|
||||||
batch[key] = torch.cat([batch[key], padding], dim=1)
|
|
||||||
if key == "logits_to_keep":
|
|
||||||
# Create padding tensor
|
|
||||||
padding = torch.ones(
|
|
||||||
1,
|
|
||||||
dtype=batch[key].dtype,
|
|
||||||
device=batch[key].device,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Concatenate padding to the right side of the tensor
|
|
||||||
batch[key] = torch.cat([batch[key], padding], dim=0)
|
|
||||||
|
|
||||||
# Update the total sequence length after padding
|
|
||||||
total_seq_len = batch["input_ids"].size(1)
|
|
||||||
|
|
||||||
# Slice batch for sequence parallel
|
|
||||||
for key in batch:
|
|
||||||
if not isinstance(batch[key], torch.Tensor) or batch[key].dim() <= 1:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Split in sequential fashion and grab this rank's chunk
|
|
||||||
if batch[key].size(1) == total_seq_len:
|
|
||||||
batch[key] = (
|
|
||||||
batch[key].chunk(local_world_size, dim=1)[local_rank].contiguous()
|
|
||||||
)
|
|
||||||
elif key == "logits_to_keep":
|
|
||||||
batch[key] = (
|
|
||||||
batch[key].chunk(local_world_size, dim=0)[local_rank].contiguous()
|
|
||||||
)
|
|
||||||
|
|
||||||
# Handle num_items_in_batch
|
|
||||||
if "num_items_in_batch" in batch:
|
|
||||||
# Approximation; this needed since num_items_in_batch may be counted across
|
|
||||||
# all samples in a gradient accumulated batch, not on a per-step basis.
|
|
||||||
batch["num_items_in_batch"] = (
|
|
||||||
batch["labels"] != -100
|
|
||||||
).sum() * gradient_accumulation_steps
|
|
||||||
|
|
||||||
return batch, original_seq_len, pad_len
|
|
||||||
|
|
||||||
|
|
||||||
class SequenceParallelContextManager:
|
|
||||||
"""Context manager for sequence parallelism operations.
|
|
||||||
|
|
||||||
This class provides a context that will automatically apply sequence parallelism
|
|
||||||
during model forward passes using a pre-forward hook, and gather outputs from
|
|
||||||
across the sequence parallelism group using a post-forward hook.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
models: List of models to apply sequence parallelism to pre- and post- forward
|
|
||||||
hooks.
|
|
||||||
sequence_parallel_degree: Number of processes to split sequences over.
|
|
||||||
gradient_accumulation_steps: Number of steps to accumulate gradients over.
|
|
||||||
ring_attn_func: Which ring attention function to use. Currently unused.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
models: list[nn.Module],
|
|
||||||
sequence_parallel_degree: int,
|
|
||||||
gradient_accumulation_steps: int,
|
|
||||||
ring_attn_func: RingAttnFunc,
|
|
||||||
):
|
|
||||||
self.models = models
|
|
||||||
self.sequence_parallel_degree = sequence_parallel_degree
|
|
||||||
self.gradient_accumulation_steps = gradient_accumulation_steps
|
|
||||||
self.ring_attn_func = ring_attn_func
|
|
||||||
self.process_group = get_ring_attn_group()
|
|
||||||
|
|
||||||
# Initialize sequence parallel group details
|
|
||||||
self.local_rank = dist.get_rank(self.process_group)
|
|
||||||
self.local_world_size = dist.get_world_size(self.process_group)
|
|
||||||
|
|
||||||
# Will store hook handles for removal
|
|
||||||
self.hook_handles: list[RemovableHandle] = []
|
|
||||||
|
|
||||||
# Store original sequence length and padding information
|
|
||||||
self.original_seq_len = 0
|
|
||||||
self.pad_len = 0
|
|
||||||
|
|
||||||
# Create a partially applied version of the apply_sequence_parallelism function
|
|
||||||
self.apply_sequence_parallelism = functools.partial(
|
|
||||||
apply_sequence_parallelism,
|
|
||||||
local_rank=self.local_rank,
|
|
||||||
local_world_size=self.local_world_size,
|
|
||||||
gradient_accumulation_steps=self.gradient_accumulation_steps,
|
|
||||||
ring_attn_func=self.ring_attn_func,
|
|
||||||
)
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
# Forward pre-hook to apply sequence parallelism
|
|
||||||
def sequence_parallel_pre_hook(_, args, kwargs):
|
|
||||||
# Apply sequence parallelism to kwargs and get original sequence length and padding info
|
|
||||||
kwargs, self.original_seq_len, self.pad_len = (
|
|
||||||
self.apply_sequence_parallelism(batch=kwargs)
|
|
||||||
)
|
|
||||||
|
|
||||||
return args, kwargs
|
|
||||||
|
|
||||||
# Forward post-hook to gather outputs
|
|
||||||
def sequence_parallel_post_hook(_, __, output: ModelOutput) -> ModelOutput:
|
|
||||||
# Gather the sharded outputs
|
|
||||||
output = self.gather_outputs(output)
|
|
||||||
|
|
||||||
# Remove padding if it was added
|
|
||||||
if self.pad_len > 0:
|
|
||||||
for key, value in output.items():
|
|
||||||
if isinstance(value, torch.Tensor) and value.dim() > 1:
|
|
||||||
if value.size(1) == self.original_seq_len + self.pad_len:
|
|
||||||
# Slice to remove padding
|
|
||||||
output[key] = value[:, : self.original_seq_len].contiguous()
|
|
||||||
|
|
||||||
return output
|
|
||||||
|
|
||||||
# Register both hooks
|
|
||||||
for model in self.models:
|
|
||||||
self.hook_handles.append(
|
|
||||||
model.register_forward_pre_hook(
|
|
||||||
sequence_parallel_pre_hook, with_kwargs=True
|
|
||||||
)
|
|
||||||
)
|
|
||||||
self.hook_handles.append(
|
|
||||||
model.register_forward_hook(sequence_parallel_post_hook)
|
|
||||||
)
|
|
||||||
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
||||||
# Remove all hooks
|
|
||||||
for handle in self.hook_handles:
|
|
||||||
handle.remove()
|
|
||||||
self.hook_handles = []
|
|
||||||
|
|
||||||
def gather_outputs(self, output: CausalLMOutputWithPast) -> CausalLMOutputWithPast:
|
|
||||||
"""Gather sharded outputs from all ranks and reconstruct the full tensor."""
|
|
||||||
for key, value in output.items():
|
|
||||||
if isinstance(value, torch.Tensor) and value.dim() > 1:
|
|
||||||
output[key] = AllGatherWithGrad.apply(value, self.process_group)
|
|
||||||
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
class AllGatherWithGrad(torch.autograd.Function):
|
|
||||||
"""Custom autograd function for all-gather to preserve gradients."""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def forward(
|
|
||||||
ctx: torch.autograd.function.FunctionCtx,
|
|
||||||
input_tensor: torch.Tensor,
|
|
||||||
group: dist.ProcessGroup,
|
|
||||||
) -> torch.Tensor:
|
|
||||||
"""
|
|
||||||
Forward pass of all-gather of data with sequence dimension.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
ctx: `torch.autograd` function context.
|
|
||||||
input_tensor: Tensor from model output with sequence dimension.
|
|
||||||
group: `torch.distributed` process group.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tensor from gathering the `input_tensor` from across the process group and
|
|
||||||
concatenating along the sequence dimension.
|
|
||||||
"""
|
|
||||||
ctx.group = group
|
|
||||||
ctx.rank = dist.get_rank(group)
|
|
||||||
world_size = dist.get_world_size(group)
|
|
||||||
|
|
||||||
# Gather shape metadata
|
|
||||||
local_shape = torch.tensor(list(input_tensor.shape), device=input_tensor.device)
|
|
||||||
all_shapes = [torch.zeros_like(local_shape) for _ in range(world_size)]
|
|
||||||
dist.all_gather(all_shapes, local_shape, group=group)
|
|
||||||
|
|
||||||
# Store sequence lengths for backward pass
|
|
||||||
seq_lens = [int(shape[1].item()) for shape in all_shapes]
|
|
||||||
ctx.seq_lens = seq_lens
|
|
||||||
|
|
||||||
# Perform all_gather operation
|
|
||||||
gathered = [
|
|
||||||
torch.zeros(
|
|
||||||
tuple(shape.tolist()),
|
|
||||||
dtype=input_tensor.dtype,
|
|
||||||
device=input_tensor.device,
|
|
||||||
)
|
|
||||||
for shape in all_shapes
|
|
||||||
]
|
|
||||||
dist.all_gather(gathered, input_tensor, group=group)
|
|
||||||
|
|
||||||
# Concatenate tensors along sequence dimension
|
|
||||||
result = torch.cat(gathered, dim=1)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def backward(
|
|
||||||
ctx: torch.autograd.function.FunctionCtx, grad_output: torch.Tensor
|
|
||||||
) -> tuple[torch.Tensor, None]:
|
|
||||||
"""
|
|
||||||
Backward pass for all-gather operation.
|
|
||||||
|
|
||||||
Extracts the gradient slice corresponding to this rank's original input
|
|
||||||
from the full gradient tensor.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
ctx: `torch.autograd` function context.
|
|
||||||
grad_output: Gradient from subsequent layers with respect to the
|
|
||||||
concatenated output tensor.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple containing the gradient slice for this rank's input tensor and `None`
|
|
||||||
for the process group parameter which doesn't require gradients.
|
|
||||||
"""
|
|
||||||
rank = ctx.rank
|
|
||||||
seq_lens = ctx.seq_lens
|
|
||||||
|
|
||||||
# Extract gradient for this rank's chunk
|
|
||||||
offset = sum(seq_lens[:rank])
|
|
||||||
grad_slice = grad_output[:, offset : offset + seq_lens[rank]].contiguous()
|
|
||||||
|
|
||||||
return grad_slice, None
|
|
||||||
@@ -18,9 +18,8 @@ from axolotl.utils.data.utils import deduplicate_and_log_datasets, md5
|
|||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.distributed import is_main_process, zero_first
|
from axolotl.utils.distributed import is_main_process, zero_first
|
||||||
from axolotl.utils.models import load_tokenizer
|
from axolotl.utils.models import load_tokenizer
|
||||||
from axolotl.utils.schemas.enums import RLType
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
|
|
||||||
def _get_path(ds_hash, cfg):
|
def _get_path(ds_hash, cfg):
|
||||||
@@ -81,7 +80,7 @@ def map_dataset(cfg, data_set, ds_transform_fn, tokenizer, **map_kwargs):
|
|||||||
def drop_long_rl_seq(
|
def drop_long_rl_seq(
|
||||||
sample, rl, tokenizer, sequence_len # pylint: disable=invalid-name
|
sample, rl, tokenizer, sequence_len # pylint: disable=invalid-name
|
||||||
):
|
):
|
||||||
if rl in (RLType.DPO, RLType.IPO, RLType.ORPO, RLType.SIMPO):
|
if rl in ("dpo", "ipo", "orpo", "simpo"):
|
||||||
if not (
|
if not (
|
||||||
sample.get("prompt") and sample.get("chosen") and sample.get("rejected")
|
sample.get("prompt") and sample.get("chosen") and sample.get("rejected")
|
||||||
):
|
):
|
||||||
@@ -101,7 +100,7 @@ def drop_long_rl_seq(
|
|||||||
len_prompt + len_rejected
|
len_prompt + len_rejected
|
||||||
) <= sequence_len
|
) <= sequence_len
|
||||||
|
|
||||||
if rl is RLType.KTO:
|
if rl == "kto":
|
||||||
if not (sample.get("prompt") and sample.get("completion")):
|
if not (sample.get("prompt") and sample.get("completion")):
|
||||||
raise ValueError("Prompt and completion keys are required for KTO datasets")
|
raise ValueError("Prompt and completion keys are required for KTO datasets")
|
||||||
|
|
||||||
@@ -115,7 +114,7 @@ def drop_long_rl_seq(
|
|||||||
|
|
||||||
return (len_prompt + len_completion) <= sequence_len
|
return (len_prompt + len_completion) <= sequence_len
|
||||||
|
|
||||||
if rl is RLType.GRPO:
|
if rl == "grpo":
|
||||||
return True
|
return True
|
||||||
|
|
||||||
raise ValueError("Unknown RL type")
|
raise ValueError("Unknown RL type")
|
||||||
@@ -138,9 +137,9 @@ def load_prepare_preference_datasets(cfg):
|
|||||||
if _type:
|
if _type:
|
||||||
if isinstance(_type, DictDefault):
|
if isinstance(_type, DictDefault):
|
||||||
_type = "user_defined.default"
|
_type = "user_defined.default"
|
||||||
if _cfg.rl is RLType.ORPO:
|
if _cfg.rl == "orpo":
|
||||||
ds_transform_fn = load_orpo(_type, _cfg, dataset_idx=i)
|
ds_transform_fn = load_orpo(_type, _cfg, dataset_idx=i)
|
||||||
elif _cfg.rl is RLType.KTO:
|
elif _cfg.rl == "kto":
|
||||||
ds_transform_fn = load_kto(_type, _cfg, dataset_idx=i)
|
ds_transform_fn = load_kto(_type, _cfg, dataset_idx=i)
|
||||||
else:
|
else:
|
||||||
ds_transform_fn = load_dpo(_type, _cfg, dataset_idx=i)
|
ds_transform_fn = load_dpo(_type, _cfg, dataset_idx=i)
|
||||||
@@ -151,7 +150,7 @@ def load_prepare_preference_datasets(cfg):
|
|||||||
split_datasets[i] = map_dataset(
|
split_datasets[i] = map_dataset(
|
||||||
cfg, data_set, ds_transform_fn, tokenizer, **map_kwargs
|
cfg, data_set, ds_transform_fn, tokenizer, **map_kwargs
|
||||||
)
|
)
|
||||||
elif _cfg.rl is RLType.KTO:
|
elif _cfg.rl == "kto":
|
||||||
ds_transform_fn = load_kto(_type, _cfg, dataset_idx=i)
|
ds_transform_fn = load_kto(_type, _cfg, dataset_idx=i)
|
||||||
map_kwargs = {}
|
map_kwargs = {}
|
||||||
if isinstance(ds_transform_fn, tuple):
|
if isinstance(ds_transform_fn, tuple):
|
||||||
@@ -186,7 +185,7 @@ def load_prepare_preference_datasets(cfg):
|
|||||||
)
|
)
|
||||||
|
|
||||||
combined_datasets = concatenate_datasets(split_datasets)
|
combined_datasets = concatenate_datasets(split_datasets)
|
||||||
combined_datasets = combined_datasets.shuffle(seed=cfg.seed or 42)
|
combined_datasets = combined_datasets.shuffle(seed=cfg.seed)
|
||||||
|
|
||||||
return combined_datasets
|
return combined_datasets
|
||||||
|
|
||||||
@@ -206,8 +205,6 @@ def load_prepare_preference_datasets(cfg):
|
|||||||
eval_dataset = load_split(cfg.test_datasets, cfg)
|
eval_dataset = load_split(cfg.test_datasets, cfg)
|
||||||
if not eval_dataset:
|
if not eval_dataset:
|
||||||
if cfg.val_set_size:
|
if cfg.val_set_size:
|
||||||
seed = cfg.seed if cfg.seed is not None else 42
|
|
||||||
|
|
||||||
# ensure we end up with the same fingerprint by doing rank0 first and being able to cache
|
# ensure we end up with the same fingerprint by doing rank0 first and being able to cache
|
||||||
to_hash_train = (
|
to_hash_train = (
|
||||||
train_dataset._fingerprint # pylint: disable=protected-access
|
train_dataset._fingerprint # pylint: disable=protected-access
|
||||||
@@ -216,7 +213,7 @@ def load_prepare_preference_datasets(cfg):
|
|||||||
+ "|"
|
+ "|"
|
||||||
+ "train"
|
+ "train"
|
||||||
+ "|"
|
+ "|"
|
||||||
+ str(seed)
|
+ str(cfg.seed or 42)
|
||||||
)
|
)
|
||||||
to_hash_test = (
|
to_hash_test = (
|
||||||
train_dataset._fingerprint # pylint: disable=protected-access
|
train_dataset._fingerprint # pylint: disable=protected-access
|
||||||
@@ -225,13 +222,13 @@ def load_prepare_preference_datasets(cfg):
|
|||||||
+ "|"
|
+ "|"
|
||||||
+ "test"
|
+ "test"
|
||||||
+ "|"
|
+ "|"
|
||||||
+ str(seed)
|
+ str(cfg.seed or 42)
|
||||||
)
|
)
|
||||||
train_fingerprint = md5(to_hash_train)
|
train_fingerprint = md5(to_hash_train)
|
||||||
test_fingerprint = md5(to_hash_test)
|
test_fingerprint = md5(to_hash_test)
|
||||||
ds_w_test_split = train_dataset.train_test_split(
|
ds_w_test_split = train_dataset.train_test_split(
|
||||||
test_size=cfg.val_set_size,
|
test_size=cfg.val_set_size,
|
||||||
seed=seed,
|
seed=cfg.seed,
|
||||||
shuffle=False,
|
shuffle=False,
|
||||||
train_new_fingerprint=train_fingerprint,
|
train_new_fingerprint=train_fingerprint,
|
||||||
test_new_fingerprint=test_fingerprint,
|
test_new_fingerprint=test_fingerprint,
|
||||||
|
|||||||
@@ -148,7 +148,7 @@ def prepare_dataset(cfg, tokenizer, processor=None, preprocess_iterable=None):
|
|||||||
ds_wrapper_partial,
|
ds_wrapper_partial,
|
||||||
max_tokens=cfg.sequence_len,
|
max_tokens=cfg.sequence_len,
|
||||||
batch_size=cfg.micro_batch_size,
|
batch_size=cfg.micro_batch_size,
|
||||||
seed=cfg.seed if cfg.seed is not None else 42,
|
seed=cfg.seed or 42,
|
||||||
buffer_size=cfg.pretrain_multipack_buffer_size or 10_000,
|
buffer_size=cfg.pretrain_multipack_buffer_size or 10_000,
|
||||||
)
|
)
|
||||||
# https://discuss.huggingface.co/t/how-to-use-huggingface-trainer-streaming-datasets-without-wrapping-it-with-torchdatas-iterablewrapper/25230
|
# https://discuss.huggingface.co/t/how-to-use-huggingface-trainer-streaming-datasets-without-wrapping-it-with-torchdatas-iterablewrapper/25230
|
||||||
@@ -416,8 +416,6 @@ def load_prepare_datasets(
|
|||||||
)
|
)
|
||||||
|
|
||||||
if split == "train" and val_set_size:
|
if split == "train" and val_set_size:
|
||||||
seed = cfg.seed if cfg.seed is not None else 42
|
|
||||||
|
|
||||||
# ensure we end up with the same fingerprint by doing rank0 first and being able to cache
|
# ensure we end up with the same fingerprint by doing rank0 first and being able to cache
|
||||||
to_hash_train = (
|
to_hash_train = (
|
||||||
dataset._fingerprint # pylint: disable=protected-access
|
dataset._fingerprint # pylint: disable=protected-access
|
||||||
@@ -426,7 +424,7 @@ def load_prepare_datasets(
|
|||||||
+ "|"
|
+ "|"
|
||||||
+ "train"
|
+ "train"
|
||||||
+ "|"
|
+ "|"
|
||||||
+ str(seed)
|
+ str(cfg.seed or 42)
|
||||||
)
|
)
|
||||||
to_hash_test = (
|
to_hash_test = (
|
||||||
dataset._fingerprint # pylint: disable=protected-access
|
dataset._fingerprint # pylint: disable=protected-access
|
||||||
@@ -435,7 +433,7 @@ def load_prepare_datasets(
|
|||||||
+ "|"
|
+ "|"
|
||||||
+ "test"
|
+ "test"
|
||||||
+ "|"
|
+ "|"
|
||||||
+ str(seed)
|
+ str(cfg.seed or 42)
|
||||||
)
|
)
|
||||||
train_fingerprint = md5(to_hash_train)
|
train_fingerprint = md5(to_hash_train)
|
||||||
test_fingerprint = md5(to_hash_test)
|
test_fingerprint = md5(to_hash_test)
|
||||||
@@ -444,7 +442,7 @@ def load_prepare_datasets(
|
|||||||
dataset = dataset.train_test_split(
|
dataset = dataset.train_test_split(
|
||||||
test_size=val_set_size,
|
test_size=val_set_size,
|
||||||
shuffle=False,
|
shuffle=False,
|
||||||
seed=seed,
|
seed=cfg.seed or 42,
|
||||||
train_new_fingerprint=train_fingerprint,
|
train_new_fingerprint=train_fingerprint,
|
||||||
test_new_fingerprint=test_fingerprint,
|
test_new_fingerprint=test_fingerprint,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -5,8 +5,11 @@ from functools import partial
|
|||||||
|
|
||||||
from packaging import version
|
from packaging import version
|
||||||
|
|
||||||
from axolotl.utils.gradient_checkpointing.unsloth import (
|
from axolotl.utils.gradient_checkpointing.offload_cpu import (
|
||||||
Unsloth_Offloaded_Gradient_Checkpointer,
|
CPU_Offloaded_Gradient_Checkpointer,
|
||||||
|
)
|
||||||
|
from axolotl.utils.gradient_checkpointing.offload_disk import (
|
||||||
|
Disco,
|
||||||
)
|
)
|
||||||
|
|
||||||
transformers_version = version.parse(importlib.metadata.version("transformers"))
|
transformers_version = version.parse(importlib.metadata.version("transformers"))
|
||||||
@@ -26,12 +29,31 @@ def hf_grad_checkpoint_offload_wrapper(
|
|||||||
decoder_layer, *args, use_reentrant=None
|
decoder_layer, *args, use_reentrant=None
|
||||||
): # pylint: disable=unused-argument
|
): # pylint: disable=unused-argument
|
||||||
if uses_gc_layers(decoder_layer):
|
if uses_gc_layers(decoder_layer):
|
||||||
return Unsloth_Offloaded_Gradient_Checkpointer.apply(
|
return CPU_Offloaded_Gradient_Checkpointer.apply(
|
||||||
decoder_layer,
|
decoder_layer,
|
||||||
*args,
|
*args,
|
||||||
)
|
)
|
||||||
|
|
||||||
return Unsloth_Offloaded_Gradient_Checkpointer.apply(
|
return CPU_Offloaded_Gradient_Checkpointer.apply(
|
||||||
|
(
|
||||||
|
decoder_layer.func.__self__
|
||||||
|
if isinstance(decoder_layer, partial)
|
||||||
|
else decoder_layer.__self__
|
||||||
|
),
|
||||||
|
*args,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def hf_grad_checkpoint_disk_offload_wrapper(
|
||||||
|
decoder_layer, *args, use_reentrant=None
|
||||||
|
): # pylint: disable=unused-argument
|
||||||
|
if uses_gc_layers(decoder_layer):
|
||||||
|
return Disco.apply(
|
||||||
|
decoder_layer,
|
||||||
|
*args,
|
||||||
|
)
|
||||||
|
|
||||||
|
return Disco.apply(
|
||||||
(
|
(
|
||||||
decoder_layer.func.__self__
|
decoder_layer.func.__self__
|
||||||
if isinstance(decoder_layer, partial)
|
if isinstance(decoder_layer, partial)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
"""Unsloth checkpointing"""
|
"""CPU offloaded checkpointing"""
|
||||||
|
|
||||||
# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
|
# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
|
||||||
#
|
#
|
||||||
@@ -26,7 +26,7 @@ else:
|
|||||||
torch_cuda_amp_custom_bwd = torch.amp.custom_bwd(device_type="cuda")
|
torch_cuda_amp_custom_bwd = torch.amp.custom_bwd(device_type="cuda")
|
||||||
|
|
||||||
|
|
||||||
class Unsloth_Offloaded_Gradient_Checkpointer( # pylint: disable=invalid-name
|
class CPU_Offloaded_Gradient_Checkpointer( # pylint: disable=invalid-name
|
||||||
torch.autograd.Function
|
torch.autograd.Function
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
531
src/axolotl/utils/gradient_checkpointing/offload_disk.py
Normal file
531
src/axolotl/utils/gradient_checkpointing/offload_disk.py
Normal file
@@ -0,0 +1,531 @@
|
|||||||
|
"""
|
||||||
|
DISCO - DIsk-based Storage and Checkpointing with Optimized prefetching
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Copyright 2025 Axolotl AI. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import atexit
|
||||||
|
import concurrent.futures
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import queue
|
||||||
|
import shutil
|
||||||
|
import tempfile
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import uuid
|
||||||
|
from collections import deque
|
||||||
|
from concurrent.futures import Future
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
torch_cuda_amp_custom_fwd = torch.amp.custom_fwd(device_type="cuda")
|
||||||
|
torch_cuda_amp_custom_bwd = torch.amp.custom_bwd(device_type="cuda")
|
||||||
|
|
||||||
|
# Setup logger
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DiskOffloadManager:
|
||||||
|
"""
|
||||||
|
Manages offloaded tensors and handles prefetching in a separate thread.
|
||||||
|
Includes synchronization to prevent race conditions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
prefetch_size: int = 3,
|
||||||
|
prefetch_to_gpu: bool = True,
|
||||||
|
save_workers: int = 4,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
prefetch_size: Maximum number of tensors to prefetch in the background.
|
||||||
|
prefetch_to_gpu: Whether to prefetch tensors directly to GPU memory.
|
||||||
|
save_workers: Maximum number of concurrent save operations.
|
||||||
|
"""
|
||||||
|
self.temp_dir = tempfile.mkdtemp(prefix="disco_")
|
||||||
|
|
||||||
|
# Track tensor paths and their status
|
||||||
|
self.tensor_paths: deque = deque() # Ordered history of tensor paths (LIFO)
|
||||||
|
self.file_locks: Dict[str, threading.Lock] = (
|
||||||
|
{}
|
||||||
|
) # Maps file_path -> threading.Lock()
|
||||||
|
# Maps file_path -> status ("saving", "ready", "prefetching", "loaded", "deleted")
|
||||||
|
self.file_status: Dict[str, str] = {}
|
||||||
|
|
||||||
|
self.max_prefetch = prefetch_size
|
||||||
|
self.prefetch_to_gpu = prefetch_to_gpu
|
||||||
|
|
||||||
|
# Thread synchronization
|
||||||
|
self.manager_lock = threading.RLock() # Used for thread-safe operations
|
||||||
|
|
||||||
|
# Prefetch queue and cache
|
||||||
|
self.prefetch_queue: queue.Queue = queue.Queue()
|
||||||
|
self.prefetch_cache: Dict[str, torch.Tensor] = {} # Maps file_path -> tensor
|
||||||
|
|
||||||
|
# Save queue and thread pool
|
||||||
|
self.save_queue: queue.Queue = queue.Queue()
|
||||||
|
self.save_pool = concurrent.futures.ThreadPoolExecutor(max_workers=save_workers)
|
||||||
|
self.save_futures: Dict[str, Future] = {}
|
||||||
|
self.save_semaphore = threading.Semaphore(
|
||||||
|
save_workers * 2
|
||||||
|
) # Limit concurrent save operations
|
||||||
|
|
||||||
|
# Start prefetch worker thread
|
||||||
|
self.stop_event = threading.Event()
|
||||||
|
# start multiple threads for prefetching
|
||||||
|
self.prefetch_worker_count = 2
|
||||||
|
self.prefetch_workers = []
|
||||||
|
for _ in range(self.prefetch_worker_count):
|
||||||
|
worker = threading.Thread(target=self._prefetch_worker, daemon=True)
|
||||||
|
worker.start()
|
||||||
|
self.prefetch_workers.append(worker)
|
||||||
|
|
||||||
|
# Start save worker thread
|
||||||
|
self.save_worker = threading.Thread(target=self._save_worker, daemon=True)
|
||||||
|
self.save_worker.start()
|
||||||
|
self.idx = 0
|
||||||
|
|
||||||
|
atexit.register(self.cleanup)
|
||||||
|
|
||||||
|
def _save_worker(self):
|
||||||
|
"""Background thread that processes the save queue"""
|
||||||
|
while not self.stop_event.is_set():
|
||||||
|
try:
|
||||||
|
save_item = self.save_queue.get(timeout=0.5)
|
||||||
|
if save_item is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
tensor, file_path = save_item
|
||||||
|
|
||||||
|
# Submit the save task to the thread pool
|
||||||
|
future = self.save_pool.submit(
|
||||||
|
self._save_tensor_to_disk, tensor, file_path
|
||||||
|
)
|
||||||
|
with self.manager_lock:
|
||||||
|
self.save_futures[file_path] = future
|
||||||
|
|
||||||
|
self.save_queue.task_done()
|
||||||
|
|
||||||
|
except queue.Empty:
|
||||||
|
time.sleep(0.01) # Small sleep to prevent CPU spinning
|
||||||
|
continue
|
||||||
|
|
||||||
|
def _save_tensor_to_disk(self, tensor: torch.Tensor, file_path: str):
|
||||||
|
"""Actually save the tensor to disk"""
|
||||||
|
try:
|
||||||
|
# Save tensor to disk
|
||||||
|
cpu_tensor = tensor.detach().cpu()
|
||||||
|
torch.save(cpu_tensor, file_path)
|
||||||
|
del cpu_tensor
|
||||||
|
|
||||||
|
with self.manager_lock:
|
||||||
|
# Mark file as ready
|
||||||
|
self.file_status[file_path] = "ready"
|
||||||
|
|
||||||
|
# Release semaphore
|
||||||
|
self.save_semaphore.release()
|
||||||
|
|
||||||
|
return True
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
logger.error(f"Error saving tensor to {file_path}: {e}")
|
||||||
|
with self.manager_lock:
|
||||||
|
self.file_status[file_path] = "error"
|
||||||
|
|
||||||
|
# Release semaphore
|
||||||
|
self.save_semaphore.release()
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _prefetch_worker(self):
|
||||||
|
"""Background thread that loads tensors from disk ahead of time"""
|
||||||
|
while not self.stop_event.is_set():
|
||||||
|
try:
|
||||||
|
file_path = self.prefetch_queue.get(timeout=0.5)
|
||||||
|
if file_path is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if file is available and not already in cache
|
||||||
|
with self.manager_lock:
|
||||||
|
if (
|
||||||
|
file_path not in self.file_status
|
||||||
|
or self.file_status[file_path] == "deleted"
|
||||||
|
):
|
||||||
|
self.prefetch_queue.task_done()
|
||||||
|
if file_path in self.prefetch_cache:
|
||||||
|
self.prefetch_queue.task_done()
|
||||||
|
continue
|
||||||
|
|
||||||
|
# If file is still being saved, wait for it
|
||||||
|
if (
|
||||||
|
self.file_status[file_path] == "saving"
|
||||||
|
and file_path in self.save_futures
|
||||||
|
):
|
||||||
|
# Re-queue this prefetch request with a little delay
|
||||||
|
self.prefetch_queue.task_done()
|
||||||
|
time.sleep(0.1)
|
||||||
|
self.prefetch_queue.put(file_path)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Mark file as being prefetched
|
||||||
|
self.file_status[file_path] = "prefetching"
|
||||||
|
|
||||||
|
# Load tensor from disk and store in cache
|
||||||
|
try:
|
||||||
|
if os.path.exists(file_path):
|
||||||
|
if self.prefetch_to_gpu:
|
||||||
|
tensor = torch.load(
|
||||||
|
file_path,
|
||||||
|
map_location=torch.device("cuda"),
|
||||||
|
weights_only=True,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
tensor = torch.load(file_path, weights_only=True)
|
||||||
|
|
||||||
|
with self.manager_lock:
|
||||||
|
self.prefetch_cache[file_path] = tensor
|
||||||
|
self.file_status[file_path] = "ready"
|
||||||
|
else:
|
||||||
|
with self.manager_lock:
|
||||||
|
if self.file_status.get(file_path) != "deleted":
|
||||||
|
logger.warning(
|
||||||
|
f"Prefetch error: File not found {file_path}"
|
||||||
|
)
|
||||||
|
self.file_status[file_path] = "missing"
|
||||||
|
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
with self.manager_lock:
|
||||||
|
if self.file_status.get(file_path) != "deleted":
|
||||||
|
logger.warning(f"Prefetch error for {file_path}: {e}")
|
||||||
|
self.file_status[file_path] = "error"
|
||||||
|
|
||||||
|
self.prefetch_queue.task_done()
|
||||||
|
|
||||||
|
except queue.Empty:
|
||||||
|
time.sleep(0.01) # Small sleep to prevent CPU spinning
|
||||||
|
continue
|
||||||
|
|
||||||
|
def save_tensor(self, tensor: torch.Tensor):
|
||||||
|
"""Save tensor to disk asynchronously and return file path with thread-safe operations"""
|
||||||
|
# Generate unique file path
|
||||||
|
self.idx += 1
|
||||||
|
file_path: str = os.path.join(
|
||||||
|
self.temp_dir, f"{self.idx:06d}-{uuid.uuid4()}.pt"
|
||||||
|
)
|
||||||
|
|
||||||
|
with self.manager_lock:
|
||||||
|
# Mark file as being saved
|
||||||
|
self.file_locks[file_path] = threading.Lock()
|
||||||
|
self.file_status[file_path] = "saving"
|
||||||
|
# Add to history
|
||||||
|
self.tensor_paths.append(file_path)
|
||||||
|
|
||||||
|
# Acquire semaphore to limit concurrent save operations
|
||||||
|
self.save_semaphore.acquire() # pylint: disable=consider-using-with
|
||||||
|
# Queue tensor for saving in background
|
||||||
|
self.save_queue.put((tensor.detach(), file_path))
|
||||||
|
|
||||||
|
return file_path
|
||||||
|
|
||||||
|
def wait_for_save(self, file_path, timeout=None) -> None:
|
||||||
|
"""Wait for a tensor to be saved to disk"""
|
||||||
|
start_time = time.time()
|
||||||
|
while timeout is None or time.time() - start_time < timeout:
|
||||||
|
with self.manager_lock:
|
||||||
|
if self.file_status.get(file_path) == "ready":
|
||||||
|
return
|
||||||
|
if self.file_status.get(file_path) in ["error", "missing", "deleted"]:
|
||||||
|
return
|
||||||
|
|
||||||
|
if file_path in self.save_futures:
|
||||||
|
future = self.save_futures[file_path]
|
||||||
|
if future.done():
|
||||||
|
return
|
||||||
|
|
||||||
|
# Small sleep to prevent CPU spinning
|
||||||
|
time.sleep(0.01)
|
||||||
|
|
||||||
|
# Timeout
|
||||||
|
logger.warning(f"Timeout waiting for tensor to be saved: {file_path}")
|
||||||
|
return
|
||||||
|
|
||||||
|
def load_tensor(self, file_path, target_device="cuda"):
|
||||||
|
"""Load tensor from disk or prefetch cache with proper synchronization"""
|
||||||
|
# Wait for tensor to be saved if it's still in progress
|
||||||
|
self.wait_for_save(file_path)
|
||||||
|
|
||||||
|
tensor = None
|
||||||
|
|
||||||
|
# Try to get from cache first
|
||||||
|
with self.manager_lock:
|
||||||
|
# Check if tensor is already in cache
|
||||||
|
if file_path in self.prefetch_cache:
|
||||||
|
tensor = self.prefetch_cache[file_path]
|
||||||
|
del self.prefetch_cache[file_path]
|
||||||
|
self.file_status[file_path] = "loaded"
|
||||||
|
|
||||||
|
if tensor is not None:
|
||||||
|
# Ensure tensor is on correct device
|
||||||
|
if target_device != "cpu" and tensor.device.type == "cpu":
|
||||||
|
tensor = tensor.to(target_device, non_blocking=True)
|
||||||
|
return tensor
|
||||||
|
|
||||||
|
# If not in cache, load directly from disk
|
||||||
|
try:
|
||||||
|
if not os.path.exists(file_path):
|
||||||
|
logger.error(f"File not found for loading: {file_path}")
|
||||||
|
raise FileNotFoundError(f"File not found: {file_path}")
|
||||||
|
|
||||||
|
tensor = torch.load(file_path, weights_only=True)
|
||||||
|
|
||||||
|
with self.manager_lock:
|
||||||
|
self.file_status[file_path] = "loaded"
|
||||||
|
|
||||||
|
if target_device != "cpu":
|
||||||
|
tensor = tensor.to(target_device, non_blocking=True)
|
||||||
|
|
||||||
|
return tensor
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error loading tensor from {file_path}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _safe_delete_file(self, file_path):
|
||||||
|
"""Safely delete a file with proper synchronization"""
|
||||||
|
with self.manager_lock:
|
||||||
|
# Make sure any save operation is completed
|
||||||
|
if file_path in self.save_futures:
|
||||||
|
future = self.save_futures[file_path]
|
||||||
|
try:
|
||||||
|
if not future.done():
|
||||||
|
future.cancel()
|
||||||
|
del self.save_futures[file_path]
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
logger.warning(
|
||||||
|
f"Error canceling save operation for {file_path}: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Only delete if file exists and is not being prefetched
|
||||||
|
status = self.file_status.get(file_path)
|
||||||
|
if status in ["ready", "loaded", "error", "missing"]:
|
||||||
|
try:
|
||||||
|
if os.path.exists(file_path):
|
||||||
|
os.remove(file_path)
|
||||||
|
self.file_status[file_path] = "deleted"
|
||||||
|
return True
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
logger.warning(f"Error deleting file {file_path}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def trigger_prefetch(self, n=None):
|
||||||
|
"""Trigger prefetching of the next N tensors with proper synchronization"""
|
||||||
|
if n is None:
|
||||||
|
n = self.max_prefetch
|
||||||
|
|
||||||
|
prefetch_paths = []
|
||||||
|
with self.manager_lock:
|
||||||
|
# Find files that are ready to be prefetched (not already in cache or being prefetched)
|
||||||
|
for path in reversed(self.tensor_paths):
|
||||||
|
if (
|
||||||
|
path not in self.prefetch_cache
|
||||||
|
and self.file_status.get(path) == "ready"
|
||||||
|
):
|
||||||
|
prefetch_paths.append(path)
|
||||||
|
if len(prefetch_paths) >= n:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Queue files for prefetching
|
||||||
|
for path in prefetch_paths:
|
||||||
|
self.prefetch_queue.put(path)
|
||||||
|
|
||||||
|
def cleanup_tensor(self, file_path: str):
|
||||||
|
"""Clean up a specific tensor file after it's been used"""
|
||||||
|
with self.manager_lock:
|
||||||
|
if file_path in self.tensor_paths:
|
||||||
|
self.tensor_paths.remove(file_path)
|
||||||
|
|
||||||
|
# Remove from prefetch cache if present
|
||||||
|
if file_path in self.prefetch_cache:
|
||||||
|
del self.prefetch_cache[file_path]
|
||||||
|
|
||||||
|
# Remove from save futures if present
|
||||||
|
if file_path in self.save_futures:
|
||||||
|
future = self.save_futures[file_path]
|
||||||
|
if not future.done():
|
||||||
|
future.cancel()
|
||||||
|
del self.save_futures[file_path]
|
||||||
|
|
||||||
|
# Try to delete the file
|
||||||
|
self._safe_delete_file(file_path)
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
"""Clean up all temp files and stop prefetch thread with proper synchronization"""
|
||||||
|
self.stop_event.set()
|
||||||
|
|
||||||
|
# Cancel all pending save operations
|
||||||
|
with self.manager_lock:
|
||||||
|
for _, future in self.save_futures.items():
|
||||||
|
if not future.done():
|
||||||
|
future.cancel()
|
||||||
|
self.save_futures.clear()
|
||||||
|
|
||||||
|
# Drain the save queue
|
||||||
|
while not self.save_queue.empty():
|
||||||
|
try:
|
||||||
|
self.save_queue.get_nowait()
|
||||||
|
self.save_queue.task_done()
|
||||||
|
except queue.Empty:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Shutdown the save pool
|
||||||
|
self.save_pool.shutdown(wait=False)
|
||||||
|
|
||||||
|
# Join the save worker thread
|
||||||
|
if self.save_worker.is_alive():
|
||||||
|
self.save_worker.join(timeout=2.0)
|
||||||
|
|
||||||
|
# Join the prefetch worker threads
|
||||||
|
for thread in self.prefetch_workers:
|
||||||
|
if thread.is_alive():
|
||||||
|
thread.join(timeout=2.0)
|
||||||
|
|
||||||
|
# Clear cache and remove all temporary files
|
||||||
|
with self.manager_lock:
|
||||||
|
self.prefetch_cache.clear()
|
||||||
|
paths_to_delete = list(self.tensor_paths)
|
||||||
|
self.tensor_paths.clear()
|
||||||
|
|
||||||
|
# Delete all temporary files
|
||||||
|
for path in paths_to_delete:
|
||||||
|
self._safe_delete_file(path)
|
||||||
|
|
||||||
|
# Remove temp directory
|
||||||
|
try:
|
||||||
|
if os.path.exists(self.temp_dir):
|
||||||
|
shutil.rmtree(self.temp_dir, ignore_errors=True)
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
logger.warning(f"Error removing temporary directory {self.temp_dir}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
class Disco(torch.autograd.Function):
|
||||||
|
"""
|
||||||
|
Disco: DIsk-based Storage and Checkpointing with Optimized prefetching
|
||||||
|
Advanced disk-based gradient checkpointer with prefetching.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Shared manager instance across all checkpointing operations
|
||||||
|
_manager = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_instance(prefetch_size=1, prefetch_to_gpu=True, save_workers=4):
|
||||||
|
"""Get or create the offload manager"""
|
||||||
|
if Disco._manager is None:
|
||||||
|
Disco._manager = DiskOffloadManager(
|
||||||
|
prefetch_size=prefetch_size,
|
||||||
|
prefetch_to_gpu=prefetch_to_gpu,
|
||||||
|
save_workers=save_workers,
|
||||||
|
)
|
||||||
|
return Disco._manager
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@torch_cuda_amp_custom_fwd
|
||||||
|
def forward(
|
||||||
|
ctx,
|
||||||
|
forward_function,
|
||||||
|
hidden_states,
|
||||||
|
*args,
|
||||||
|
prefetch_size=1,
|
||||||
|
prefetch_to_gpu=True,
|
||||||
|
save_workers=4,
|
||||||
|
):
|
||||||
|
"""Forward pass that offloads activations to disk asynchronously"""
|
||||||
|
# Get or create the manager
|
||||||
|
manager = Disco.get_instance(
|
||||||
|
prefetch_size=prefetch_size,
|
||||||
|
prefetch_to_gpu=prefetch_to_gpu,
|
||||||
|
save_workers=save_workers,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Save tensor to disk asynchronously
|
||||||
|
file_path = manager.save_tensor(hidden_states)
|
||||||
|
|
||||||
|
# Run forward pass immediately without waiting for save to complete
|
||||||
|
with torch.no_grad():
|
||||||
|
output = forward_function(hidden_states, *args)
|
||||||
|
|
||||||
|
# Store what we need for backward
|
||||||
|
ctx.save_for_backward(torch.tensor([0])) # Dummy tensor
|
||||||
|
ctx.file_path = file_path
|
||||||
|
ctx.forward_function = forward_function
|
||||||
|
ctx.args = args
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@torch_cuda_amp_custom_bwd
|
||||||
|
def backward(ctx, *grad_outputs):
|
||||||
|
"""Backward pass that loads activations from disk with prefetching"""
|
||||||
|
# Get the manager
|
||||||
|
manager = Disco._manager
|
||||||
|
|
||||||
|
# Trigger prefetching for future tensors
|
||||||
|
# This happens at the start of backward, so should have time to complete
|
||||||
|
manager.trigger_prefetch()
|
||||||
|
|
||||||
|
# Load hidden states from disk or prefetch cache
|
||||||
|
file_path = ctx.file_path
|
||||||
|
try:
|
||||||
|
# Ensure the file is saved before we try to load it
|
||||||
|
manager.wait_for_save(file_path)
|
||||||
|
|
||||||
|
hidden_states = manager.load_tensor(file_path)
|
||||||
|
hidden_states.requires_grad = True
|
||||||
|
|
||||||
|
# Compute gradients
|
||||||
|
with torch.enable_grad():
|
||||||
|
output = ctx.forward_function(hidden_states, *ctx.args)
|
||||||
|
|
||||||
|
# Handle tuple outputs properly
|
||||||
|
if isinstance(output, tuple):
|
||||||
|
if len(grad_outputs) == len(output):
|
||||||
|
torch.autograd.backward(output, grad_outputs)
|
||||||
|
else:
|
||||||
|
torch.autograd.backward(output, grad_outputs[0])
|
||||||
|
else:
|
||||||
|
torch.autograd.backward(output, grad_outputs[0])
|
||||||
|
|
||||||
|
# Clean up the file after we're done with it
|
||||||
|
manager.cleanup_tensor(file_path)
|
||||||
|
|
||||||
|
return (
|
||||||
|
(
|
||||||
|
None, # forward_function
|
||||||
|
hidden_states.grad, # hidden_states grad
|
||||||
|
)
|
||||||
|
+ (None,) * len(ctx.args) # for each arg
|
||||||
|
+ (
|
||||||
|
None, # prefetch_size
|
||||||
|
None, # prefetch_to_gpu
|
||||||
|
None, # save_workers
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in backward pass: {e}")
|
||||||
|
# Clean up the file even on error
|
||||||
|
manager.cleanup_tensor(file_path)
|
||||||
|
raise
|
||||||
@@ -70,10 +70,12 @@ from axolotl.utils.distributed import (
|
|||||||
is_local_main_process,
|
is_local_main_process,
|
||||||
is_main_process,
|
is_main_process,
|
||||||
)
|
)
|
||||||
from axolotl.utils.gradient_checkpointing import hf_grad_checkpoint_offload_wrapper
|
from axolotl.utils.gradient_checkpointing import (
|
||||||
|
hf_grad_checkpoint_disk_offload_wrapper,
|
||||||
|
hf_grad_checkpoint_offload_wrapper,
|
||||||
|
)
|
||||||
from axolotl.utils.lora_embeddings import get_linear_embedding_layers
|
from axolotl.utils.lora_embeddings import get_linear_embedding_layers
|
||||||
from axolotl.utils.model_shard_quant import load_sharded_model, load_sharded_model_quant
|
from axolotl.utils.model_shard_quant import load_sharded_model, load_sharded_model_quant
|
||||||
from axolotl.utils.schemas.enums import RLType
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
PLUGIN_MANAGER = PluginManager.get_instance()
|
PLUGIN_MANAGER = PluginManager.get_instance()
|
||||||
@@ -142,22 +144,6 @@ def check_model_config(cfg: DictDefault, model_config: PretrainedConfig):
|
|||||||
hasattr(model_config, "quantization_config")
|
hasattr(model_config, "quantization_config")
|
||||||
and model_config.quantization_config
|
and model_config.quantization_config
|
||||||
)
|
)
|
||||||
|
|
||||||
# Detect compressed-tensors config
|
|
||||||
is_compressed_tensors_config = (
|
|
||||||
quant_config_exists
|
|
||||||
and model_config.quantization_config.get("quant_method") == "compressed-tensors"
|
|
||||||
)
|
|
||||||
|
|
||||||
if is_compressed_tensors_config:
|
|
||||||
if model_config.quantization_config.get("config_groups"):
|
|
||||||
LOG.warning(
|
|
||||||
"Found `config_groups` in a compressed-tensors config. "
|
|
||||||
"QAT integration with llmcompressor is not tested."
|
|
||||||
)
|
|
||||||
# Skip further quant checks for compressed-tensors
|
|
||||||
return
|
|
||||||
|
|
||||||
quant_config_method_is_gptq = (
|
quant_config_method_is_gptq = (
|
||||||
quant_config_exists
|
quant_config_exists
|
||||||
and "quant_method" in model_config.quantization_config
|
and "quant_method" in model_config.quantization_config
|
||||||
@@ -620,6 +606,10 @@ class ModelLoader:
|
|||||||
|
|
||||||
if self.cfg.gradient_checkpointing in ["unsloth", "offload"]:
|
if self.cfg.gradient_checkpointing in ["unsloth", "offload"]:
|
||||||
transformers.modeling_utils.checkpoint = hf_grad_checkpoint_offload_wrapper
|
transformers.modeling_utils.checkpoint = hf_grad_checkpoint_offload_wrapper
|
||||||
|
if self.cfg.gradient_checkpointing == "offload_disk":
|
||||||
|
transformers.modeling_utils.checkpoint = (
|
||||||
|
hf_grad_checkpoint_disk_offload_wrapper
|
||||||
|
)
|
||||||
|
|
||||||
if self.cfg.flash_attention:
|
if self.cfg.flash_attention:
|
||||||
self.patch_attention()
|
self.patch_attention()
|
||||||
@@ -1373,7 +1363,7 @@ class ModelLoader:
|
|||||||
# then the dpo trainer doesn't want the peft model loaded over it, it just wants the lora/peft config
|
# then the dpo trainer doesn't want the peft model loaded over it, it just wants the lora/peft config
|
||||||
if (
|
if (
|
||||||
self.cfg.adapter
|
self.cfg.adapter
|
||||||
and self.cfg.rl in [RLType.DPO, RLType.IPO, RLType.KTO]
|
and self.cfg.rl in ["dpo", "ipo", "kto"]
|
||||||
and not self.cfg.merge_lora
|
and not self.cfg.merge_lora
|
||||||
):
|
):
|
||||||
_, lora_config = load_lora(
|
_, lora_config = load_lora(
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ from axolotl.utils.schemas.datasets import (
|
|||||||
StepwiseSupervisedDataset,
|
StepwiseSupervisedDataset,
|
||||||
)
|
)
|
||||||
from axolotl.utils.schemas.deprecated import DeprecatedParameters, RemappedParameters
|
from axolotl.utils.schemas.deprecated import DeprecatedParameters, RemappedParameters
|
||||||
from axolotl.utils.schemas.enums import ChatTemplate, RingAttnFunc, RLType
|
from axolotl.utils.schemas.enums import ChatTemplate, RLType
|
||||||
from axolotl.utils.schemas.integrations import (
|
from axolotl.utils.schemas.integrations import (
|
||||||
CometConfig,
|
CometConfig,
|
||||||
GradioConfig,
|
GradioConfig,
|
||||||
@@ -83,7 +83,6 @@ class AxolotlInputConfig(
|
|||||||
# optionally shrink the embeddings when the tokenizer vocab size is smaller
|
# optionally shrink the embeddings when the tokenizer vocab size is smaller
|
||||||
shrink_embeddings: bool | None = None
|
shrink_embeddings: bool | None = None
|
||||||
embeddings_skip_upcast: bool | None = None
|
embeddings_skip_upcast: bool | None = None
|
||||||
random_init_weights: bool | None = None
|
|
||||||
|
|
||||||
rl: RLType | None = None
|
rl: RLType | None = None
|
||||||
trl: TRLConfig | None = Field(
|
trl: TRLConfig | None = Field(
|
||||||
@@ -179,7 +178,7 @@ class AxolotlInputConfig(
|
|||||||
|
|
||||||
# torch_dtype: torch.dtype | None
|
# torch_dtype: torch.dtype | None
|
||||||
|
|
||||||
gradient_checkpointing: Literal["unsloth", "offload"] | bool | None = Field(
|
gradient_checkpointing: Literal["offload", "offload_disk"] | bool | None = Field(
|
||||||
default=False
|
default=False
|
||||||
)
|
)
|
||||||
gradient_checkpointing_kwargs: dict[str, Any] | None = None
|
gradient_checkpointing_kwargs: dict[str, Any] | None = None
|
||||||
@@ -261,7 +260,7 @@ class AxolotlInputConfig(
|
|||||||
|
|
||||||
sequence_parallel_degree: int | None = None
|
sequence_parallel_degree: int | None = None
|
||||||
heads_k_stride: int | None = None
|
heads_k_stride: int | None = None
|
||||||
ring_attn_func: RingAttnFunc | None = None
|
ring_attn_func: str | None = None
|
||||||
|
|
||||||
special_tokens: SpecialTokensConfig | None = None
|
special_tokens: SpecialTokensConfig | None = None
|
||||||
tokens: list[str] | None = None
|
tokens: list[str] | None = None
|
||||||
@@ -783,7 +782,7 @@ class AxolotlInputConfig(
|
|||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def check_simpo_warmup(self):
|
def check_simpo_warmup(self):
|
||||||
if self.rl is RLType.SIMPO and self.warmup_ratio:
|
if self.rl == "simpo" and self.warmup_ratio:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"warmup_ratio is not supported with the simpo trainer. Please use `warmup_steps` instead"
|
"warmup_ratio is not supported with the simpo trainer. Please use `warmup_steps` instead"
|
||||||
)
|
)
|
||||||
@@ -1186,7 +1185,7 @@ class AxolotlInputConfig(
|
|||||||
|
|
||||||
if self.sample_packing and self.micro_batch_size > 1:
|
if self.sample_packing and self.micro_batch_size > 1:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"micro_batch_size must be set to 1 when sample_packing is enabled "
|
"micro_batch_size must be set to 1 when sample_packing is enabled"
|
||||||
"due to a `ring-flash-attn` requirement"
|
"due to a `ring-flash-attn` requirement"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1218,8 +1217,16 @@ class AxolotlInputConfig(
|
|||||||
if getattr(self, "sequence_parallel_degree", 1) == 1:
|
if getattr(self, "sequence_parallel_degree", 1) == 1:
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
from axolotl.monkeypatch.attention.ring_attn.patch import RingAttnFunc
|
||||||
|
|
||||||
if self.ring_attn_func is not None:
|
if self.ring_attn_func is not None:
|
||||||
self.ring_attn_func = RingAttnFunc(self.ring_attn_func)
|
valid_funcs = list(RingAttnFunc)
|
||||||
|
if self.ring_attn_func in valid_funcs:
|
||||||
|
self.ring_attn_func = RingAttnFunc(self.ring_attn_func)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"ring_attn_func: {self.ring_attn_func} must be in {valid_funcs}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# Default ring attention function selection
|
# Default ring attention function selection
|
||||||
sample_packing = getattr(self, "sample_packing", False)
|
sample_packing = getattr(self, "sample_packing", False)
|
||||||
|
|||||||
@@ -6,12 +6,12 @@ from enum import Enum
|
|||||||
class RLType(str, Enum):
|
class RLType(str, Enum):
|
||||||
"""RL trainer type configuration subset"""
|
"""RL trainer type configuration subset"""
|
||||||
|
|
||||||
DPO = "dpo" # pylint: disable=invalid-name
|
dpo = "dpo" # pylint: disable=invalid-name
|
||||||
GRPO = "grpo" # pylint: disable=invalid-name
|
grpo = "grpo" # pylint: disable=invalid-name
|
||||||
IPO = "ipo" # pylint: disable=invalid-name
|
ipo = "ipo" # pylint: disable=invalid-name
|
||||||
ORPO = "orpo" # pylint: disable=invalid-name
|
orpo = "orpo" # pylint: disable=invalid-name
|
||||||
KTO = "kto" # pylint: disable=invalid-name
|
kto = "kto" # pylint: disable=invalid-name
|
||||||
SIMPO = "simpo" # pylint: disable=invalid-name
|
simpo = "simpo" # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
|
||||||
class ChatTemplate(str, Enum):
|
class ChatTemplate(str, Enum):
|
||||||
@@ -55,14 +55,3 @@ class CustomSupportedOptimizers(str, Enum):
|
|||||||
adopt_adamw = "adopt_adamw" # pylint: disable=invalid-name
|
adopt_adamw = "adopt_adamw" # pylint: disable=invalid-name
|
||||||
came_pytorch = "came_pytorch" # pylint: disable=invalid-name
|
came_pytorch = "came_pytorch" # pylint: disable=invalid-name
|
||||||
muon = "muon" # pylint: disable=invalid-name
|
muon = "muon" # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
|
||||||
class RingAttnFunc(str, Enum):
|
|
||||||
"""Enum class for supported `ring-flash-attn` implementations"""
|
|
||||||
|
|
||||||
# VARLEN_RING = "varlen_ring"
|
|
||||||
# VARLEN_ZIGZAG = "varlen_zigzag"
|
|
||||||
VARLEN_LLAMA3 = "varlen_llama3"
|
|
||||||
BATCH_RING = "batch_ring"
|
|
||||||
# BATCH_ZIGZAG = "batch_zigzag"
|
|
||||||
# BATCH_STRIPE = "batch_stripe"
|
|
||||||
|
|||||||
@@ -1,111 +0,0 @@
|
|||||||
"""
|
|
||||||
E2E smoke tests for LLMCompressorPlugin integration
|
|
||||||
"""
|
|
||||||
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
|
||||||
from axolotl.common.datasets import load_datasets
|
|
||||||
from axolotl.train import train
|
|
||||||
from axolotl.utils.config import normalize_config, prepare_plugins, validate_config
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
|
|
||||||
from tests.e2e.utils import (
|
|
||||||
check_model_output_exists,
|
|
||||||
require_llmcompressor,
|
|
||||||
require_torch_2_4_1,
|
|
||||||
)
|
|
||||||
|
|
||||||
MODELS = [
|
|
||||||
"nm-testing/llama2.c-stories42M-pruned2.4-compressed",
|
|
||||||
"nm-testing/llama2.c-stories42M-gsm8k-sparse-only-compressed",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"base_model", MODELS, ids=["no-checkpoint-recipe", "with-checkpoint-recipe"]
|
|
||||||
)
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"save_compressed", [True, False], ids=["save_compressed", "save_uncompressed"]
|
|
||||||
)
|
|
||||||
class TestLLMCompressorIntegration:
|
|
||||||
"""
|
|
||||||
e2e tests for axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
|
||||||
"""
|
|
||||||
|
|
||||||
@require_llmcompressor
|
|
||||||
@require_torch_2_4_1
|
|
||||||
def test_llmcompressor_plugin(
|
|
||||||
self, temp_dir, base_model: str, save_compressed: bool
|
|
||||||
):
|
|
||||||
from llmcompressor import active_session
|
|
||||||
|
|
||||||
# core cfg
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"base_model": base_model,
|
|
||||||
"plugins": ["axolotl.integrations.llm_compressor.LLMCompressorPlugin"],
|
|
||||||
"sequence_len": 1024,
|
|
||||||
"val_set_size": 0.05,
|
|
||||||
"special_tokens": {"pad_token": "<|endoftext|>"},
|
|
||||||
"datasets": [{"path": "mhenrichsen/alpaca_2k_test", "type": "alpaca"}],
|
|
||||||
"num_epochs": 1,
|
|
||||||
"micro_batch_size": 2,
|
|
||||||
"gradient_accumulation_steps": 2,
|
|
||||||
"output_dir": temp_dir,
|
|
||||||
"learning_rate": 1e-5,
|
|
||||||
"optimizer": "adamw_torch_fused",
|
|
||||||
"lr_scheduler": "cosine",
|
|
||||||
"save_safetensors": True,
|
|
||||||
"bf16": "auto",
|
|
||||||
"max_steps": 5,
|
|
||||||
"llmcompressor": {
|
|
||||||
"recipe": {
|
|
||||||
"finetuning_stage": {
|
|
||||||
"finetuning_modifiers": {
|
|
||||||
"ConstantPruningModifier": {
|
|
||||||
"targets": [
|
|
||||||
"re:.*q_proj.weight",
|
|
||||||
"re:.*k_proj.weight",
|
|
||||||
"re:.*v_proj.weight",
|
|
||||||
"re:.*o_proj.weight",
|
|
||||||
"re:.*gate_proj.weight",
|
|
||||||
"re:.*up_proj.weight",
|
|
||||||
"re:.*down_proj.weight",
|
|
||||||
],
|
|
||||||
"start": 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"save_compressed": save_compressed,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
prepare_plugins(cfg)
|
|
||||||
cfg = validate_config(cfg)
|
|
||||||
normalize_config(cfg)
|
|
||||||
cli_args = TrainerCliArgs()
|
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
|
||||||
|
|
||||||
try:
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
|
||||||
check_model_output_exists(temp_dir, cfg)
|
|
||||||
_check_llmcompressor_model_outputs(temp_dir, save_compressed)
|
|
||||||
finally:
|
|
||||||
active_session().reset()
|
|
||||||
|
|
||||||
|
|
||||||
def _check_llmcompressor_model_outputs(temp_dir, save_compressed):
|
|
||||||
if save_compressed:
|
|
||||||
assert (Path(temp_dir) / "recipe.yaml").exists()
|
|
||||||
|
|
||||||
from compressed_tensors import ModelCompressor
|
|
||||||
from compressed_tensors.config import Sparse24BitMaskConfig
|
|
||||||
|
|
||||||
compressor = ModelCompressor.from_pretrained(temp_dir)
|
|
||||||
assert compressor is not None
|
|
||||||
assert isinstance(compressor.sparsity_config, Sparse24BitMaskConfig)
|
|
||||||
@@ -25,7 +25,6 @@ class TestSequenceParallelism:
|
|||||||
micro_batch_size=1,
|
micro_batch_size=1,
|
||||||
pad_to_sequence_len=True,
|
pad_to_sequence_len=True,
|
||||||
ring_attn_func=None,
|
ring_attn_func=None,
|
||||||
threshold=2.0,
|
|
||||||
):
|
):
|
||||||
"""Helper method to run sequence parallel tests with different configurations"""
|
"""Helper method to run sequence parallel tests with different configurations"""
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
@@ -94,22 +93,22 @@ class TestSequenceParallelism:
|
|||||||
)
|
)
|
||||||
|
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/train_loss", threshold, "Train Loss is too high"
|
temp_dir + "/runs", "train/train_loss", 2.6, "Train Loss is too high"
|
||||||
)
|
)
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"sample_packing, micro_batch_size, pad_to_sequence_len, ring_attn_func, threshold",
|
"sample_packing, micro_batch_size, pad_to_sequence_len, ring_attn_func",
|
||||||
[
|
[
|
||||||
(True, 1, True, None, 2.5), # defaults to varlen_llama3 ring_attn_func
|
(True, 1, True, None), # defaults to varlen_llama3 ring_attn_func
|
||||||
(False, 2, True, None, 2.5), # defaults to batch_ring ring_attn_func
|
(False, 2, True, None), # defaults to batch_ring ring_attn_func
|
||||||
# (False, 2, True, "batch_zigzag", 2.5),
|
(False, 2, True, "batch_zigzag"),
|
||||||
(False, 2, False, None, 2.5), # defaults to batch_ring ring_attn_func
|
# (False, 2, False), # not yet working
|
||||||
],
|
],
|
||||||
ids=[
|
ids=[
|
||||||
"sample_packing, varlen_llama3 ring_attn_func",
|
"sample_packing, varlen_llama3 ring_attn_func",
|
||||||
"no sample_packing, pad_to_sequence_len, batch_ring ring_attn_func",
|
|
||||||
# "no sample_packing, no pad_to_sequence_len, batch_zigzag ring_attn_func",
|
|
||||||
"no sample_packing, no pad_to_sequence_len, batch_ring ring_attn_func",
|
"no sample_packing, no pad_to_sequence_len, batch_ring ring_attn_func",
|
||||||
|
"no sample_packing, no pad_to_sequence_len, batch_zigzag ring_attn_func",
|
||||||
|
# "no sample_packing, pad_to_sequence_len", # not yet working
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_sequence_parallel_training(
|
def test_sequence_parallel_training(
|
||||||
@@ -119,7 +118,6 @@ class TestSequenceParallelism:
|
|||||||
micro_batch_size,
|
micro_batch_size,
|
||||||
pad_to_sequence_len,
|
pad_to_sequence_len,
|
||||||
ring_attn_func,
|
ring_attn_func,
|
||||||
threshold,
|
|
||||||
):
|
):
|
||||||
"""Test sequence parallel training with different configurations"""
|
"""Test sequence parallel training with different configurations"""
|
||||||
self._run_sequence_parallel_test(
|
self._run_sequence_parallel_test(
|
||||||
@@ -128,5 +126,4 @@ class TestSequenceParallelism:
|
|||||||
micro_batch_size=micro_batch_size,
|
micro_batch_size=micro_batch_size,
|
||||||
pad_to_sequence_len=pad_to_sequence_len,
|
pad_to_sequence_len=pad_to_sequence_len,
|
||||||
ring_attn_func=ring_attn_func,
|
ring_attn_func=ring_attn_func,
|
||||||
threshold=threshold,
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -26,10 +26,15 @@ class TestActivationCheckpointing:
|
|||||||
E2E tests for activation checkpointing
|
E2E tests for activation checkpointing
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"gradient_checkpointing",
|
||||||
|
["offload", "offload_disk"],
|
||||||
|
)
|
||||||
def test_activation_checkpointing_offload(
|
def test_activation_checkpointing_offload(
|
||||||
self,
|
self,
|
||||||
temp_dir,
|
temp_dir,
|
||||||
fix_checkpoint_after_test, # pylint: disable=unused-argument,redefined-outer-name
|
fix_checkpoint_after_test, # pylint: disable=unused-argument,redefined-outer-name
|
||||||
|
gradient_checkpointing,
|
||||||
):
|
):
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
@@ -64,7 +69,7 @@ class TestActivationCheckpointing:
|
|||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
"bf16": True,
|
"bf16": True,
|
||||||
"save_safetensors": True,
|
"save_safetensors": True,
|
||||||
"gradient_checkpointing": "offload",
|
"gradient_checkpointing": gradient_checkpointing,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -10,15 +10,14 @@ import pytest
|
|||||||
import torch
|
import torch
|
||||||
from accelerate.state import PartialState
|
from accelerate.state import PartialState
|
||||||
|
|
||||||
|
from axolotl.core.trainers.mixins.sequence_parallel import apply_sequence_parallelism
|
||||||
from axolotl.monkeypatch.attention.ring_attn import (
|
from axolotl.monkeypatch.attention.ring_attn import (
|
||||||
|
RingAttnFunc,
|
||||||
get_ring_attn_group,
|
get_ring_attn_group,
|
||||||
register_ring_attn,
|
register_ring_attn,
|
||||||
set_ring_attn_group,
|
set_ring_attn_group,
|
||||||
)
|
)
|
||||||
from axolotl.utils.ctx_managers.sequence_parallel import apply_sequence_parallelism
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.schemas.enums import RingAttnFunc
|
|
||||||
from axolotl.utils.schemas.trl import TRLConfig
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@@ -63,14 +62,12 @@ def sequence_parallel_batch():
|
|||||||
input_ids = torch.arange(batch_size * seq_len).reshape(batch_size, seq_len)
|
input_ids = torch.arange(batch_size * seq_len).reshape(batch_size, seq_len)
|
||||||
attention_mask = torch.ones(batch_size, seq_len)
|
attention_mask = torch.ones(batch_size, seq_len)
|
||||||
position_ids = torch.arange(seq_len).expand(batch_size, seq_len)
|
position_ids = torch.arange(seq_len).expand(batch_size, seq_len)
|
||||||
labels = input_ids.clone()
|
|
||||||
|
|
||||||
# Create test batch
|
# Create test batch
|
||||||
batch = {
|
batch = {
|
||||||
"input_ids": input_ids,
|
"input_ids": input_ids,
|
||||||
"attention_mask": attention_mask,
|
"attention_mask": attention_mask,
|
||||||
"position_ids": position_ids,
|
"position_ids": position_ids,
|
||||||
"labels": labels,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return batch
|
return batch
|
||||||
@@ -182,44 +179,12 @@ class TestConfigValidation:
|
|||||||
False,
|
False,
|
||||||
"micro_batch_size must be set to 1",
|
"micro_batch_size must be set to 1",
|
||||||
),
|
),
|
||||||
# Valid: Basic GRPO config
|
|
||||||
(
|
|
||||||
{
|
|
||||||
"sequence_parallel_degree": 2,
|
|
||||||
"flash_attention": True,
|
|
||||||
"micro_batch_size": 2,
|
|
||||||
"trl": {"use_liger_loss": True},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"sequence_parallel_degree": 2,
|
|
||||||
"flash_attention": True,
|
|
||||||
"micro_batch_size": 2,
|
|
||||||
"trl": TRLConfig(use_liger_loss=True),
|
|
||||||
},
|
|
||||||
True,
|
|
||||||
"GRPO + SP + Liger not currently supported",
|
|
||||||
),
|
|
||||||
# Invalid: GRPO config with Liger loss
|
|
||||||
(
|
|
||||||
{
|
|
||||||
"rl": "grpo",
|
|
||||||
"sequence_parallel_degree": 2,
|
|
||||||
"flash_attention": True,
|
|
||||||
"micro_batch_size": 2,
|
|
||||||
"trl": {"use_liger_loss": True},
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
False,
|
|
||||||
"GRPO + SP + Liger not currently supported",
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
ids=[
|
ids=[
|
||||||
"valid_config",
|
"valid_config",
|
||||||
"default_sp_degree",
|
"default_sp_degree",
|
||||||
"without_flash_attention",
|
"without_flash_attention",
|
||||||
"sample_packing_with_large_batch",
|
"sample_packing_with_large_batch",
|
||||||
"valid_grpo",
|
|
||||||
"grpo_with_liger_loss",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_sequence_parallel_config_validation(
|
def test_sequence_parallel_config_validation(
|
||||||
@@ -291,7 +256,7 @@ class TestConfigValidation:
|
|||||||
AxolotlInputConfig(**cfg)
|
AxolotlInputConfig(**cfg)
|
||||||
|
|
||||||
# Verify error message
|
# Verify error message
|
||||||
assert "Input should be 'varlen_llama3' or 'batch_ring'" in str(excinfo.value)
|
assert "ring_attn_func: INVALID_FUNC must be in" in str(excinfo.value)
|
||||||
|
|
||||||
|
|
||||||
class TestApplySequenceParallelism:
|
class TestApplySequenceParallelism:
|
||||||
@@ -325,11 +290,10 @@ class TestApplySequenceParallelism:
|
|||||||
|
|
||||||
def test_world_size_one(self, sequence_parallel_batch):
|
def test_world_size_one(self, sequence_parallel_batch):
|
||||||
"""Test that function returns original batch when world size is 1."""
|
"""Test that function returns original batch when world size is 1."""
|
||||||
result, _, _ = apply_sequence_parallelism(
|
result = apply_sequence_parallelism(
|
||||||
batch=sequence_parallel_batch,
|
batch=sequence_parallel_batch,
|
||||||
local_rank=0,
|
local_rank=0,
|
||||||
local_world_size=1,
|
local_world_size=1,
|
||||||
gradient_accumulation_steps=1,
|
|
||||||
ring_attn_func=RingAttnFunc.BATCH_RING,
|
ring_attn_func=RingAttnFunc.BATCH_RING,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -341,11 +305,10 @@ class TestApplySequenceParallelism:
|
|||||||
batch = sequence_parallel_batch
|
batch = sequence_parallel_batch
|
||||||
seq_len = batch["input_ids"].size(1)
|
seq_len = batch["input_ids"].size(1)
|
||||||
|
|
||||||
result, _, _ = apply_sequence_parallelism(
|
result = apply_sequence_parallelism(
|
||||||
batch=batch,
|
batch=batch,
|
||||||
local_rank=0,
|
local_rank=0,
|
||||||
local_world_size=2,
|
local_world_size=2,
|
||||||
gradient_accumulation_steps=1,
|
|
||||||
ring_attn_func=RingAttnFunc.BATCH_RING,
|
ring_attn_func=RingAttnFunc.BATCH_RING,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -365,59 +328,57 @@ class TestApplySequenceParallelism:
|
|||||||
seq_len = batch["input_ids"].size(1)
|
seq_len = batch["input_ids"].size(1)
|
||||||
original_input_ids = batch["input_ids"].clone()
|
original_input_ids = batch["input_ids"].clone()
|
||||||
|
|
||||||
result, _, _ = apply_sequence_parallelism(
|
result = apply_sequence_parallelism(
|
||||||
batch=batch,
|
batch=batch,
|
||||||
local_rank=1,
|
local_rank=1,
|
||||||
local_world_size=2,
|
local_world_size=2,
|
||||||
gradient_accumulation_steps=1,
|
|
||||||
ring_attn_func=RingAttnFunc.BATCH_RING,
|
ring_attn_func=RingAttnFunc.BATCH_RING,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Verify content: rank 1 should get the second half of the sequence
|
# Verify content: rank 1 should get the second half of the sequence
|
||||||
assert torch.equal(result["input_ids"], original_input_ids[:, seq_len // 2 :])
|
assert torch.equal(result["input_ids"], original_input_ids[:, seq_len // 2 :])
|
||||||
|
|
||||||
# TODO(djsaunde): add back once implemented.
|
def test_batch_zigzag(self, sequence_parallel_batch):
|
||||||
# def test_batch_zigzag(self, sequence_parallel_batch):
|
"""Test BATCH_ZIGZAG sharding pattern."""
|
||||||
# """Test BATCH_ZIGZAG sharding pattern."""
|
batch = sequence_parallel_batch
|
||||||
# batch = sequence_parallel_batch
|
original_input_ids = batch["input_ids"].clone()
|
||||||
# original_input_ids = batch["input_ids"].clone()
|
seq_len = batch["input_ids"].size(1)
|
||||||
# seq_len = batch["input_ids"].size(1)
|
|
||||||
|
|
||||||
# # Test rank 0
|
# Test rank 0
|
||||||
# result_rank0 = apply_sequence_parallelism(
|
result_rank0 = apply_sequence_parallelism(
|
||||||
# batch={k: v.clone() for k, v in batch.items()},
|
batch={k: v.clone() for k, v in batch.items()},
|
||||||
# local_rank=0,
|
local_rank=0,
|
||||||
# local_world_size=2,
|
local_world_size=2,
|
||||||
# ring_attn_func=RingAttnFunc.BATCH_ZIGZAG,
|
ring_attn_func=RingAttnFunc.BATCH_ZIGZAG,
|
||||||
# )
|
)
|
||||||
|
|
||||||
# # Test rank 1
|
# Test rank 1
|
||||||
# result_rank1 = apply_sequence_parallelism(
|
result_rank1 = apply_sequence_parallelism(
|
||||||
# batch={k: v.clone() for k, v in batch.items()},
|
batch={k: v.clone() for k, v in batch.items()},
|
||||||
# local_rank=1,
|
local_rank=1,
|
||||||
# local_world_size=2,
|
local_world_size=2,
|
||||||
# ring_attn_func=RingAttnFunc.BATCH_ZIGZAG,
|
ring_attn_func=RingAttnFunc.BATCH_ZIGZAG,
|
||||||
# )
|
)
|
||||||
|
|
||||||
# # Checks for both ranks
|
# Checks for both ranks
|
||||||
# assert result_rank0["input_ids"].shape[1] == seq_len // 2
|
assert result_rank0["input_ids"].shape[1] == seq_len // 2
|
||||||
# assert result_rank1["input_ids"].shape[1] == seq_len // 2
|
assert result_rank1["input_ids"].shape[1] == seq_len // 2
|
||||||
|
|
||||||
# # For a 2-rank system with 8 tokens, check specific zigzag pattern
|
# For a 2-rank system with 8 tokens, check specific zigzag pattern
|
||||||
# # Rank 0 should get chunks [0, 1] and [6, 7]
|
# Rank 0 should get chunks [0, 1] and [6, 7]
|
||||||
# # Rank 1 should get chunks [2, 3] and [4, 5]
|
# Rank 1 should get chunks [2, 3] and [4, 5]
|
||||||
# if seq_len == 8:
|
if seq_len == 8:
|
||||||
# # Create expected tensors for comparison
|
# Create expected tensors for comparison
|
||||||
# rank0_expected = torch.cat(
|
rank0_expected = torch.cat(
|
||||||
# [original_input_ids[:, :2], original_input_ids[:, 6:8]], dim=1
|
[original_input_ids[:, :2], original_input_ids[:, 6:8]], dim=1
|
||||||
# )
|
)
|
||||||
|
|
||||||
# rank1_expected = torch.cat(
|
rank1_expected = torch.cat(
|
||||||
# [original_input_ids[:, 2:4], original_input_ids[:, 4:6]], dim=1
|
[original_input_ids[:, 2:4], original_input_ids[:, 4:6]], dim=1
|
||||||
# )
|
)
|
||||||
|
|
||||||
# assert torch.equal(result_rank0["input_ids"], rank0_expected)
|
assert torch.equal(result_rank0["input_ids"], rank0_expected)
|
||||||
# assert torch.equal(result_rank1["input_ids"], rank1_expected)
|
assert torch.equal(result_rank1["input_ids"], rank1_expected)
|
||||||
|
|
||||||
def test_partial_application(self, sequence_parallel_batch):
|
def test_partial_application(self, sequence_parallel_batch):
|
||||||
"""Test that we can create a partially applied version of the function."""
|
"""Test that we can create a partially applied version of the function."""
|
||||||
@@ -429,12 +390,11 @@ class TestApplySequenceParallelism:
|
|||||||
apply_sequence_parallelism,
|
apply_sequence_parallelism,
|
||||||
local_rank=0,
|
local_rank=0,
|
||||||
local_world_size=2,
|
local_world_size=2,
|
||||||
gradient_accumulation_steps=1,
|
|
||||||
ring_attn_func=RingAttnFunc.BATCH_RING,
|
ring_attn_func=RingAttnFunc.BATCH_RING,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Use the partially applied function
|
# Use the partially applied function
|
||||||
result, _, _ = rank0_ring_parallel(batch=batch)
|
result = rank0_ring_parallel(batch=batch)
|
||||||
|
|
||||||
# Verify it works as expected
|
# Verify it works as expected
|
||||||
assert result["input_ids"].shape[1] == original_input_ids.shape[1] // 2
|
assert result["input_ids"].shape[1] == original_input_ids.shape[1] // 2
|
||||||
@@ -452,15 +412,13 @@ class TestApplySequenceParallelism:
|
|||||||
original_input_ids = batch["input_ids"].clone()
|
original_input_ids = batch["input_ids"].clone()
|
||||||
|
|
||||||
# This should run without error even though position_ids is missing
|
# This should run without error even though position_ids is missing
|
||||||
result, _, _ = apply_sequence_parallelism(
|
result = apply_sequence_parallelism(
|
||||||
batch=batch,
|
batch=batch,
|
||||||
local_rank=0,
|
local_rank=0,
|
||||||
local_world_size=2,
|
local_world_size=2,
|
||||||
gradient_accumulation_steps=1,
|
|
||||||
ring_attn_func=RingAttnFunc.BATCH_RING,
|
ring_attn_func=RingAttnFunc.BATCH_RING,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Verification should pass
|
# Verification should pass
|
||||||
assert "position_ids" in result
|
assert "position_ids" not in result
|
||||||
assert result["input_ids"].shape[1] == result["position_ids"].shape[1]
|
|
||||||
assert result["input_ids"].shape[1] == original_input_ids.shape[1] // 2
|
assert result["input_ids"].shape[1] == original_input_ids.shape[1] // 2
|
||||||
|
|||||||
@@ -105,25 +105,7 @@ def require_vllm(test_case):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
return unittest.skipUnless(
|
return unittest.skipUnless(
|
||||||
is_vllm_installed(), "test requires vllm to be installed"
|
is_vllm_installed(), "test requires a vllm to be installed"
|
||||||
)(test_case)
|
|
||||||
|
|
||||||
|
|
||||||
def require_llmcompressor(test_case):
|
|
||||||
"""
|
|
||||||
Decorator marking a test that requires a llmcompressor to be installed
|
|
||||||
"""
|
|
||||||
|
|
||||||
def is_llmcompressor_installed():
|
|
||||||
try:
|
|
||||||
import llmcompressor # pylint: disable=unused-import # noqa: F401
|
|
||||||
|
|
||||||
return True
|
|
||||||
except ImportError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return unittest.skipUnless(
|
|
||||||
is_llmcompressor_installed(), "test requires llmcompressor to be installed"
|
|
||||||
)(test_case)
|
)(test_case)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user