Compare commits
35 Commits
llmcompres
...
llmcompres
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c9880977be | ||
|
|
f196941315 | ||
|
|
5be047ac46 | ||
|
|
758115b8c6 | ||
|
|
0dc1da5876 | ||
|
|
f3e876dbfc | ||
|
|
99c13ef60c | ||
|
|
2c24434ee0 | ||
|
|
586268a0d7 | ||
|
|
b600e119b6 | ||
|
|
a8e5ba000e | ||
|
|
bc3dfa666d | ||
|
|
4371f3459e | ||
|
|
cc58d5e072 | ||
|
|
d197b054e3 | ||
|
|
7e1e153831 | ||
|
|
42de3096cf | ||
|
|
27758840a1 | ||
|
|
8dbf5c215a | ||
|
|
6411ca3fe1 | ||
|
|
813809c54d | ||
|
|
af7cfdc30b | ||
|
|
b76d2d1130 | ||
|
|
7946f89df4 | ||
|
|
8b33ae1c4f | ||
|
|
dc4da4a7e2 | ||
|
|
f9c7c3bb72 | ||
|
|
caf5cb63ea | ||
|
|
5dba5c82a8 | ||
|
|
e3c9d541a7 | ||
|
|
9eba0ad118 | ||
|
|
53dbf97d85 | ||
|
|
2c2563bc34 | ||
|
|
5cb3398460 | ||
|
|
ae1c7ace63 |
2
.github/workflows/main.yml
vendored
2
.github/workflows/main.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
|||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.5.1
|
pytorch: 2.5.1
|
||||||
axolotl_extras: vllm
|
axolotl_extras:
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
3
.github/workflows/multi-gpu-e2e.yml
vendored
3
.github/workflows/multi-gpu-e2e.yml
vendored
@@ -8,6 +8,7 @@ on:
|
|||||||
- 'setup.py'
|
- 'setup.py'
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
- '.github/workflows/multi-gpu-e2e.yml'
|
- '.github/workflows/multi-gpu-e2e.yml'
|
||||||
|
- 'src/axolotl/core/trainers/mixins/sequence_parallel.py'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 0 * * 1,4' # Runs at 00:00 UTC every monday & thursday
|
- cron: '0 0 * * 1,4' # Runs at 00:00 UTC every monday & thursday
|
||||||
@@ -42,7 +43,7 @@ jobs:
|
|||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.5.1
|
pytorch: 2.5.1
|
||||||
axolotl_extras: vllm
|
axolotl_extras:
|
||||||
num_gpus: 2
|
num_gpus: 2
|
||||||
nightly_build: "true"
|
nightly_build: "true"
|
||||||
- cuda: 126
|
- cuda: 126
|
||||||
|
|||||||
8
.github/workflows/tests.yml
vendored
8
.github/workflows/tests.yml
vendored
@@ -258,6 +258,12 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
|
- cuda: 124
|
||||||
|
cuda_version: 12.4.1
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.6.0
|
||||||
|
num_gpus: 1
|
||||||
|
axolotl_extras: llmcompressor
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -269,7 +275,7 @@ jobs:
|
|||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.5.1
|
pytorch: 2.5.1
|
||||||
num_gpus: 1
|
num_gpus: 1
|
||||||
axolotl_extras: vllm
|
axolotl_extras:
|
||||||
- cuda: 126
|
- cuda: 126
|
||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
@@ -52,4 +52,4 @@ pytest -v --durations=10 \
|
|||||||
--cov-append \
|
--cov-append \
|
||||||
--cov-report=xml:e2e-coverage.xml
|
--cov-report=xml:e2e-coverage.xml
|
||||||
|
|
||||||
codecov upload-process -t $CODECOV_TOKEN -f e2e-coverage.xml -F e2e,pytorch-${PYTORCH_VERSION}
|
codecov upload-process -t $CODECOV_TOKEN -f e2e-coverage.xml -F e2e,pytorch-${PYTORCH_VERSION} || true
|
||||||
|
|||||||
@@ -20,4 +20,4 @@ pytest -v --durations=10 -n1 /workspace/axolotl/tests/e2e/multigpu/patched/ \
|
|||||||
--cov-report=xml:multigpu-coverage.xml
|
--cov-report=xml:multigpu-coverage.xml
|
||||||
|
|
||||||
# Upload coverage to Codecov
|
# Upload coverage to Codecov
|
||||||
codecov upload-process -t $CODECOV_TOKEN -f multigpu-coverage.xml -F multigpu,docker-tests,pytorch-${PYTORCH_VERSION}
|
codecov upload-process -t "${CODECOV_TOKEN}" -f multigpu-coverage.xml -F multigpu,docker-tests,pytorch-${PYTORCH_VERSION} || true
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
codecov:
|
codecov:
|
||||||
require_ci_to_pass: yes
|
require_ci_to_pass: yes
|
||||||
|
notify:
|
||||||
|
wait_for_ci: true
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
precision: 2
|
precision: 2
|
||||||
|
|||||||
@@ -49,7 +49,8 @@ sections = [
|
|||||||
("Knowledge Distillation (KD)", "kd"),
|
("Knowledge Distillation (KD)", "kd"),
|
||||||
("Liger Kernels", "liger"),
|
("Liger Kernels", "liger"),
|
||||||
("Language Model Evaluation Harness (LM Eval)", "lm_eval"),
|
("Language Model Evaluation Harness (LM Eval)", "lm_eval"),
|
||||||
("Spectrum", "spectrum")
|
("Spectrum", "spectrum"),
|
||||||
|
("LLMCompressor", "llm_compressor")
|
||||||
]
|
]
|
||||||
|
|
||||||
for section_name, folder_name in sections:
|
for section_name, folder_name in sections:
|
||||||
|
|||||||
@@ -28,6 +28,8 @@ main-base-py{python_version}-cu{cuda_version}-{pytorch_version}
|
|||||||
|
|
||||||
Tags examples:
|
Tags examples:
|
||||||
|
|
||||||
|
- `main-base-py3.11-cu128-2.7.0`
|
||||||
|
- `main-base-py3.11-cu126-2.7.0`
|
||||||
- `main-base-py3.11-cu124-2.6.0`
|
- `main-base-py3.11-cu124-2.6.0`
|
||||||
- `main-base-py3.11-cu124-2.5.1`
|
- `main-base-py3.11-cu124-2.5.1`
|
||||||
- `main-base-py3.11-cu124-2.4.1`
|
- `main-base-py3.11-cu124-2.4.1`
|
||||||
@@ -50,7 +52,7 @@ Link: [Docker Hub](https://hub.docker.com/r/axolotlai/axolotl)
|
|||||||
# on push to main
|
# on push to main
|
||||||
main-py{python_version}-cu{cuda_version}-{pytorch_version}
|
main-py{python_version}-cu{cuda_version}-{pytorch_version}
|
||||||
|
|
||||||
# latest main (currently torch 2.5.1, python 3.11, cuda 12.4)
|
# latest main (currently torch 2.6.0, python 3.11, cuda 12.4)
|
||||||
main-latest
|
main-latest
|
||||||
|
|
||||||
# nightly build
|
# nightly build
|
||||||
@@ -68,6 +70,7 @@ There may be some extra tags appended to the image, like `-vllm` which installs
|
|||||||
|
|
||||||
Tags examples:
|
Tags examples:
|
||||||
|
|
||||||
|
- `main-py3.11-cu126-2.7.0`
|
||||||
- `main-py3.11-cu124-2.6.0`
|
- `main-py3.11-cu124-2.6.0`
|
||||||
- `main-py3.11-cu124-2.5.1`
|
- `main-py3.11-cu124-2.5.1`
|
||||||
- `main-py3.11-cu124-2.4.1`
|
- `main-py3.11-cu124-2.4.1`
|
||||||
|
|||||||
77
examples/llama-3/sparse-finetuning.yaml
Normal file
77
examples/llama-3/sparse-finetuning.yaml
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
base_model: neuralmagic/Sparse-Llama-3.1-8B-2of4
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: tatsu-lab/alpaca
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./outputs/out
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
eval_sample_packing: false
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: paged_adamw_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 2e-5
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
gradient_checkpointing_kwargs:
|
||||||
|
use_reentrant: false
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 100
|
||||||
|
evals_per_epoch: 2
|
||||||
|
eval_table_size:
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
|
pad_token: <|end_of_text|>
|
||||||
|
|
||||||
|
llmcompressor:
|
||||||
|
recipe:
|
||||||
|
finetuning_stage:
|
||||||
|
finetuning_modifiers:
|
||||||
|
ConstantPruningModifier:
|
||||||
|
targets: [
|
||||||
|
're:.*q_proj.weight',
|
||||||
|
're:.*k_proj.weight',
|
||||||
|
're:.*v_proj.weight',
|
||||||
|
're:.*o_proj.weight',
|
||||||
|
're:.*gate_proj.weight',
|
||||||
|
're:.*up_proj.weight',
|
||||||
|
're:.*down_proj.weight',
|
||||||
|
]
|
||||||
|
start: 0
|
||||||
|
save_compressed: true
|
||||||
@@ -10,7 +10,6 @@ plugins:
|
|||||||
liger_glu_activation: true
|
liger_glu_activation: true
|
||||||
liger_rms_norm: true
|
liger_rms_norm: true
|
||||||
liger_layer_norm: true
|
liger_layer_norm: true
|
||||||
cut_cross_entropy: true
|
|
||||||
|
|
||||||
llama4_linearized_experts: true # needed with custom linearized experts model
|
llama4_linearized_experts: true # needed with custom linearized experts model
|
||||||
load_in_4bit: true
|
load_in_4bit: true
|
||||||
|
|||||||
@@ -11,13 +11,13 @@ liger-kernel==0.5.8
|
|||||||
|
|
||||||
packaging==23.2
|
packaging==23.2
|
||||||
|
|
||||||
peft==0.15.1
|
peft==0.15.2
|
||||||
transformers==4.51.3
|
transformers==4.51.3
|
||||||
tokenizers>=0.21.1
|
tokenizers>=0.21.1
|
||||||
accelerate==1.6.0
|
accelerate==1.6.0
|
||||||
datasets==3.5.0
|
datasets==3.5.0
|
||||||
deepspeed>=0.15.4
|
deepspeed>=0.15.4
|
||||||
trl==0.16.1
|
trl==0.17.0
|
||||||
hf_xet==1.0.0
|
hf_xet==1.0.0
|
||||||
hqq==0.2.5
|
hqq==0.2.5
|
||||||
|
|
||||||
|
|||||||
7
setup.py
7
setup.py
@@ -67,13 +67,13 @@ def parse_requirements(extras_require_map):
|
|||||||
if (major, minor) >= (2, 7):
|
if (major, minor) >= (2, 7):
|
||||||
_install_requires.pop(_install_requires.index(xformers_version))
|
_install_requires.pop(_install_requires.index(xformers_version))
|
||||||
# _install_requires.append("xformers==0.0.29.post3") # xformers seems to be hard pinned to 2.6.0
|
# _install_requires.append("xformers==0.0.29.post3") # xformers seems to be hard pinned to 2.6.0
|
||||||
extras_require_map["vllm"] = ["vllm==0.8.3"]
|
extras_require_map["vllm"] = ["vllm==0.8.4"]
|
||||||
elif (major, minor) >= (2, 6):
|
elif (major, minor) >= (2, 6):
|
||||||
_install_requires.pop(_install_requires.index(xformers_version))
|
_install_requires.pop(_install_requires.index(xformers_version))
|
||||||
_install_requires.append(
|
_install_requires.append(
|
||||||
"xformers==0.0.29.post2"
|
"xformers==0.0.29.post2"
|
||||||
) # vllm needs post2 w torch 2.6
|
) # vllm needs post2 w torch 2.6
|
||||||
extras_require_map["vllm"] = ["vllm==0.8.3"]
|
extras_require_map["vllm"] = ["vllm==0.8.4"]
|
||||||
elif (major, minor) >= (2, 5):
|
elif (major, minor) >= (2, 5):
|
||||||
_install_requires.pop(_install_requires.index(xformers_version))
|
_install_requires.pop(_install_requires.index(xformers_version))
|
||||||
if patch == 0:
|
if patch == 0:
|
||||||
@@ -149,6 +149,9 @@ extras_require = {
|
|||||||
"vllm": [
|
"vllm": [
|
||||||
"vllm==0.7.2",
|
"vllm==0.7.2",
|
||||||
],
|
],
|
||||||
|
"llmcompressor": [
|
||||||
|
"llmcompressor==0.5.1",
|
||||||
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
install_requires, dependency_links, extras_require_build = parse_requirements(
|
install_requires, dependency_links, extras_require_build = parse_requirements(
|
||||||
|
|||||||
@@ -932,9 +932,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
collator = DataCollatorForSeq2Seq
|
collator = DataCollatorForSeq2Seq
|
||||||
|
|
||||||
kwargs["return_tensors"] = "pt"
|
kwargs["return_tensors"] = "pt"
|
||||||
if issubclass(collator, DataCollatorForSeq2Seq):
|
|
||||||
kwargs["sequence_parallel_degree"] = training_args.sequence_parallel_degree
|
|
||||||
kwargs["ring_attn_func"] = training_args.ring_attn_func
|
|
||||||
|
|
||||||
return collator(
|
return collator(
|
||||||
*collator_args,
|
*collator_args,
|
||||||
@@ -1051,6 +1048,9 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.rpo_alpha is not None:
|
if self.cfg.rpo_alpha is not None:
|
||||||
training_args_kwargs["rpo_alpha"] = self.cfg.rpo_alpha
|
training_args_kwargs["rpo_alpha"] = self.cfg.rpo_alpha
|
||||||
|
|
||||||
|
if self.cfg.use_wandb:
|
||||||
|
training_args_kwargs["run_name"] = self.cfg.wandb_name
|
||||||
|
|
||||||
training_args_cls = None
|
training_args_cls = None
|
||||||
blocklist_args_kwargs = []
|
blocklist_args_kwargs = []
|
||||||
if self.cfg.rl == "simpo":
|
if self.cfg.rl == "simpo":
|
||||||
@@ -1121,6 +1121,12 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
**training_args_kwargs,
|
**training_args_kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# unset run_name so wandb sets up experiment names
|
||||||
|
if self.cfg.use_wandb and training_args.run_name == training_args.output_dir:
|
||||||
|
training_args.run_name = ( # pylint: disable=attribute-defined-outside-init
|
||||||
|
None
|
||||||
|
)
|
||||||
|
|
||||||
return training_args
|
return training_args
|
||||||
|
|
||||||
def build(self, total_num_steps):
|
def build(self, total_num_steps):
|
||||||
|
|||||||
@@ -371,13 +371,15 @@ class AxolotlTrainer(
|
|||||||
num_items_in_batch=num_items_in_batch,
|
num_items_in_batch=num_items_in_batch,
|
||||||
)
|
)
|
||||||
|
|
||||||
return super().compute_loss(
|
loss = super().compute_loss(
|
||||||
model,
|
model,
|
||||||
inputs,
|
inputs,
|
||||||
return_outputs=return_outputs,
|
return_outputs=return_outputs,
|
||||||
num_items_in_batch=num_items_in_batch,
|
num_items_in_batch=num_items_in_batch,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
return loss
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def orpo_concatenate_inputs(inputs, label_pad_token=-100, pad_token=0, device=None):
|
def orpo_concatenate_inputs(inputs, label_pad_token=-100, pad_token=0, device=None):
|
||||||
concatenated_batch = {}
|
concatenated_batch = {}
|
||||||
|
|||||||
@@ -135,7 +135,9 @@ class GRPOStrategy:
|
|||||||
try:
|
try:
|
||||||
# use importlib to dynamically load the reward function from the module
|
# use importlib to dynamically load the reward function from the module
|
||||||
reward_func_module_name = reward_func_fqn.split(".")[-1]
|
reward_func_module_name = reward_func_fqn.split(".")[-1]
|
||||||
reward_func_module = importlib.import_module(reward_func_fqn.split(".")[-2])
|
reward_func_module = importlib.import_module(
|
||||||
|
".".join(reward_func_fqn.split(".")[:-1])
|
||||||
|
)
|
||||||
reward_func = getattr(reward_func_module, reward_func_module_name)
|
reward_func = getattr(reward_func_module, reward_func_module_name)
|
||||||
if not len(inspect.signature(reward_func).parameters) >= 2:
|
if not len(inspect.signature(reward_func).parameters) >= 2:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
|
|||||||
@@ -6,4 +6,4 @@
|
|||||||
from .optimizer import OptimizerMixin
|
from .optimizer import OptimizerMixin
|
||||||
from .rng_state_loader import RngLoaderMixin
|
from .rng_state_loader import RngLoaderMixin
|
||||||
from .scheduler import SchedulerMixin
|
from .scheduler import SchedulerMixin
|
||||||
from .sequence_parallel import SequenceParallelMixin
|
from .sequence_parallel import SequenceParallelContextManager, SequenceParallelMixin
|
||||||
|
|||||||
@@ -1,16 +1,86 @@
|
|||||||
"""Module for Axolotl trainer sequence parallelism mixin"""
|
"""
|
||||||
|
Module for Axolotl trainer sequence parallelism mixin and training context manager
|
||||||
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
import torch
|
||||||
import torch.distributed as dist
|
import torch.distributed as dist
|
||||||
from datasets import Dataset
|
from datasets import Dataset
|
||||||
|
from torch import nn
|
||||||
from torch.utils.data import DistributedSampler, Sampler
|
from torch.utils.data import DistributedSampler, Sampler
|
||||||
|
from torch.utils.hooks import RemovableHandle
|
||||||
|
|
||||||
from axolotl.monkeypatch.attention.ring_attn import get_ring_attn_group
|
from axolotl.monkeypatch.attention.ring_attn import (
|
||||||
|
RingAttnFunc,
|
||||||
|
get_ring_attn_group,
|
||||||
|
update_ring_attn_params,
|
||||||
|
)
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def apply_sequence_parallelism(
|
||||||
|
batch: dict[str, torch.Tensor],
|
||||||
|
local_rank: int,
|
||||||
|
local_world_size: int,
|
||||||
|
ring_attn_func: RingAttnFunc,
|
||||||
|
) -> dict[str, torch.Tensor]:
|
||||||
|
"""
|
||||||
|
Apply sequence parallelism slicing to a batch.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
batch: Batch dictionary (e.g., input_ids, attention_mask, etc.)
|
||||||
|
local_rank: Local rank in the sequence parallel group
|
||||||
|
local_world_size: World size of the sequence parallel group
|
||||||
|
ring_attn_func: The ring attention function to use
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Sliced batch dictionary.
|
||||||
|
"""
|
||||||
|
# Update ring attention params if needed
|
||||||
|
if batch.get("position_ids") is not None:
|
||||||
|
update_ring_attn_params(position_ids=batch["position_ids"])
|
||||||
|
|
||||||
|
# Slice batch for sequence parallel processing
|
||||||
|
total_seq_len = batch["input_ids"].size(1)
|
||||||
|
for key in batch:
|
||||||
|
if (
|
||||||
|
key in batch
|
||||||
|
and isinstance(batch[key], torch.Tensor)
|
||||||
|
and batch[key].dim() > 1
|
||||||
|
and batch[key].size(1) == total_seq_len
|
||||||
|
):
|
||||||
|
|
||||||
|
if ring_attn_func in [
|
||||||
|
RingAttnFunc.VARLEN_LLAMA3,
|
||||||
|
RingAttnFunc.BATCH_RING,
|
||||||
|
]:
|
||||||
|
# Split in sequential fashion and grab this rank's chunk
|
||||||
|
batch[key] = (
|
||||||
|
batch[key].chunk(local_world_size, dim=1)[local_rank].contiguous()
|
||||||
|
)
|
||||||
|
elif ring_attn_func is RingAttnFunc.BATCH_ZIGZAG:
|
||||||
|
chunks = batch[key].chunk(2 * local_world_size, dim=1)
|
||||||
|
|
||||||
|
# Take rank's chunk and opposing chunk for zigzag pattern
|
||||||
|
selected_chunks = [
|
||||||
|
chunks[local_rank],
|
||||||
|
chunks[2 * local_world_size - local_rank - 1],
|
||||||
|
]
|
||||||
|
batch[key] = torch.cat(selected_chunks, dim=1).contiguous()
|
||||||
|
elif ring_attn_func is RingAttnFunc.BATCH_STRIPE:
|
||||||
|
# Split into striped data and stack
|
||||||
|
tensor = torch.stack(
|
||||||
|
batch[key].split(local_world_size, dim=1),
|
||||||
|
dim=1,
|
||||||
|
).transpose(1, 2)
|
||||||
|
batch[key] = tensor[:, local_rank].contiguous()
|
||||||
|
|
||||||
|
return batch
|
||||||
|
|
||||||
|
|
||||||
class SequenceParallelMixin:
|
class SequenceParallelMixin:
|
||||||
"""
|
"""
|
||||||
Mixin class for sequence parallelism support in trainers.
|
Mixin class for sequence parallelism support in trainers.
|
||||||
@@ -87,3 +157,157 @@ class SequenceParallelMixin:
|
|||||||
return self._create_sequence_parallel_sampler(
|
return self._create_sequence_parallel_sampler(
|
||||||
eval_dataset, shuffle=False, is_eval=True
|
eval_dataset, shuffle=False, is_eval=True
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class SequenceParallelContextManager:
|
||||||
|
"""
|
||||||
|
Context manager for sequence parallelism operations.
|
||||||
|
|
||||||
|
This class provides a context that will automatically apply sequence parallelism
|
||||||
|
during model forward passes using a pre-forward hook, and gather outputs from
|
||||||
|
across the sequence parallelism group using a post-forward hook.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model: nn.Module,
|
||||||
|
sequence_parallel_degree: int,
|
||||||
|
ring_attn_func: RingAttnFunc,
|
||||||
|
):
|
||||||
|
self.model = model
|
||||||
|
self.sequence_parallel_degree = sequence_parallel_degree
|
||||||
|
self.ring_attn_func = ring_attn_func
|
||||||
|
self.process_group = get_ring_attn_group()
|
||||||
|
|
||||||
|
# Initialize sequence parallel group details
|
||||||
|
self.local_rank = dist.get_rank(self.process_group)
|
||||||
|
self.local_world_size = dist.get_world_size(self.process_group)
|
||||||
|
|
||||||
|
# Will store hook handles for removal
|
||||||
|
self.hook_handles: list[RemovableHandle] = []
|
||||||
|
|
||||||
|
# Create a partially applied version of the apply_sequence_parallelism function
|
||||||
|
# with pre-configured params
|
||||||
|
self.apply_sequence_parallelism = functools.partial(
|
||||||
|
apply_sequence_parallelism,
|
||||||
|
local_rank=self.local_rank,
|
||||||
|
local_world_size=self.local_world_size,
|
||||||
|
ring_attn_func=self.ring_attn_func,
|
||||||
|
)
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
# Forward pre-hook to apply sequence parallelism
|
||||||
|
def sequence_parallel_pre_hook(_, args, kwargs):
|
||||||
|
# Apply sequence parallelism to kwargs
|
||||||
|
kwargs = self.apply_sequence_parallelism(batch=kwargs)
|
||||||
|
return args, kwargs
|
||||||
|
|
||||||
|
# Forward post-hook to gather outputs
|
||||||
|
def sequence_parallel_post_hook(_, __, output):
|
||||||
|
# Gather the sharded outputs
|
||||||
|
return self.gather_outputs(output)
|
||||||
|
|
||||||
|
# Register both hooks
|
||||||
|
self.hook_handles.append(
|
||||||
|
self.model.register_forward_pre_hook(
|
||||||
|
sequence_parallel_pre_hook, with_kwargs=True
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self.hook_handles.append(
|
||||||
|
self.model.register_forward_hook(sequence_parallel_post_hook)
|
||||||
|
)
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
# Remove all hooks
|
||||||
|
for handle in self.hook_handles:
|
||||||
|
handle.remove()
|
||||||
|
self.hook_handles = []
|
||||||
|
|
||||||
|
def gather_outputs(self, output):
|
||||||
|
"""Gather sharded outputs from all ranks and reconstruct the full tensor."""
|
||||||
|
# Handle different output formats (dict, tensor, etc.)
|
||||||
|
if isinstance(output, dict):
|
||||||
|
gathered_output = {}
|
||||||
|
for key, value in output.items():
|
||||||
|
if isinstance(value, torch.Tensor) and value.dim() > 1:
|
||||||
|
# Gather logits or other sequence-sharded tensors
|
||||||
|
gathered_value = self.gather_tensor(value)
|
||||||
|
gathered_output[key] = gathered_value
|
||||||
|
else:
|
||||||
|
gathered_value = value.clone()
|
||||||
|
dist.all_reduce(
|
||||||
|
gathered_value, op=dist.ReduceOp.SUM, group=self.process_group
|
||||||
|
)
|
||||||
|
gathered_output[key] = gathered_value
|
||||||
|
return gathered_output
|
||||||
|
if isinstance(output, torch.Tensor):
|
||||||
|
return self.gather_tensor(output)
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
def gather_tensor(self, tensor):
|
||||||
|
"""Gather a sharded tensor from all ranks."""
|
||||||
|
# Prepare tensors for all_gather
|
||||||
|
world_size = self.local_world_size
|
||||||
|
|
||||||
|
# Create list to store tensors from all ranks
|
||||||
|
gathered_tensors = [torch.zeros_like(tensor) for _ in range(world_size)]
|
||||||
|
|
||||||
|
# All-gather operation
|
||||||
|
dist.all_gather(gathered_tensors, tensor, group=self.process_group)
|
||||||
|
|
||||||
|
# Concatenate along sequence dimension (typically dim=1)
|
||||||
|
if self.ring_attn_func in [RingAttnFunc.VARLEN_LLAMA3, RingAttnFunc.BATCH_RING]:
|
||||||
|
# Simple concatenation for standard sharding
|
||||||
|
return torch.cat(gathered_tensors, dim=1)
|
||||||
|
|
||||||
|
if self.ring_attn_func is RingAttnFunc.BATCH_ZIGZAG:
|
||||||
|
# Each rank has a pattern of (rank, world_size*2-rank-1)
|
||||||
|
reconstituted_tensors = [None] * (world_size * 2)
|
||||||
|
|
||||||
|
# First, split each gathered tensor into its two chunks
|
||||||
|
for rank, gathered_tensor in enumerate(gathered_tensors):
|
||||||
|
# Each tensor contains two chunks in the sequence dimension
|
||||||
|
chunk_size = gathered_tensor.size(1) // 2
|
||||||
|
chunk1, chunk2 = gathered_tensor.split(chunk_size, dim=1)
|
||||||
|
|
||||||
|
# Place chunks in their original positions
|
||||||
|
reconstituted_tensors[rank] = chunk1
|
||||||
|
reconstituted_tensors[world_size * 2 - rank - 1] = chunk2
|
||||||
|
|
||||||
|
# Concatenate the reconstituted tensors in the correct order
|
||||||
|
return torch.cat(reconstituted_tensors, dim=1)
|
||||||
|
|
||||||
|
# Otherwise, RingAttnFunc.BATCH_STRIPE
|
||||||
|
# In striping, each rank has every world_size-th slice
|
||||||
|
batch_size = tensor.size(0)
|
||||||
|
hidden_dim = tensor.size(-1)
|
||||||
|
|
||||||
|
# First, determine the full sequence length
|
||||||
|
total_seq_len = 0
|
||||||
|
for t in gathered_tensors:
|
||||||
|
total_seq_len += t.size(1)
|
||||||
|
|
||||||
|
# Create a tensor to hold the unstriped result
|
||||||
|
result = torch.zeros(
|
||||||
|
batch_size,
|
||||||
|
total_seq_len,
|
||||||
|
hidden_dim,
|
||||||
|
dtype=tensor.dtype,
|
||||||
|
device=tensor.device,
|
||||||
|
)
|
||||||
|
|
||||||
|
# For each rank's tensor, distribute its slices to the correct positions
|
||||||
|
for rank, gathered_tensor in enumerate(gathered_tensors):
|
||||||
|
# The rank's tensor contains every world_size-th slice
|
||||||
|
# starting from its rank position
|
||||||
|
seq_len = gathered_tensor.size(1)
|
||||||
|
for i in range(seq_len):
|
||||||
|
# Calculate the position in the full tensor
|
||||||
|
pos = i * world_size + rank
|
||||||
|
if pos < total_seq_len:
|
||||||
|
result[:, pos] = gathered_tensor[:, i]
|
||||||
|
|
||||||
|
return result
|
||||||
|
|||||||
@@ -27,8 +27,6 @@ pip3 uninstall -y cut-cross-entropy && pip3 install "cut-cross-entropy[transform
|
|||||||
```yaml
|
```yaml
|
||||||
plugins:
|
plugins:
|
||||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||||
|
|
||||||
cut_cross_entropy: true
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Supported Models
|
## Supported Models
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ class CutCrossEntropyArgs(BaseModel):
|
|||||||
Input args for Cut Cross Entropy.
|
Input args for Cut Cross Entropy.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cut_cross_entropy: Optional[bool] = None
|
cut_cross_entropy: Optional[bool] = True
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|||||||
108
src/axolotl/integrations/llm_compressor/README.md
Normal file
108
src/axolotl/integrations/llm_compressor/README.md
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
# LLMCompressor Integration
|
||||||
|
|
||||||
|
Fine-tune sparsified models in Axolotl using Neural Magic's [LLMCompressor](https://github.com/vllm-project/llm-compressor).
|
||||||
|
|
||||||
|
This integration enables fine-tuning of models sparsified using LLMCompressor within the Axolotl training framework. By combining LLMCompressor's model compression capabilities with Axolotl's distributed training pipelines, users can efficiently fine-tune sparse models at scale.
|
||||||
|
|
||||||
|
It uses Axolotl’s plugin system to hook into the fine-tuning flows while maintaining sparsity throughout training.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- Axolotl with `llmcompressor` extras:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install "axolotl[llmcompressor]"
|
||||||
|
```
|
||||||
|
|
||||||
|
- Requires `llmcompressor >= 0.5.1`
|
||||||
|
|
||||||
|
This will install all necessary dependencies to fine-tune sparsified models using the integration.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To enable sparse fine-tuning with this integration, include the plugin in your Axolotl config:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
plugins:
|
||||||
|
- axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
||||||
|
|
||||||
|
llmcompressor:
|
||||||
|
recipe:
|
||||||
|
finetuning_stage:
|
||||||
|
finetuning_modifiers:
|
||||||
|
ConstantPruningModifier:
|
||||||
|
targets: [
|
||||||
|
're:.*q_proj.weight',
|
||||||
|
're:.*k_proj.weight',
|
||||||
|
're:.*v_proj.weight',
|
||||||
|
're:.*o_proj.weight',
|
||||||
|
're:.*gate_proj.weight',
|
||||||
|
're:.*up_proj.weight',
|
||||||
|
're:.*down_proj.weight',
|
||||||
|
]
|
||||||
|
start: 0
|
||||||
|
save_compressed: true
|
||||||
|
# ... (other training arguments)
|
||||||
|
```
|
||||||
|
|
||||||
|
This plugin **does not apply pruning or sparsification itself** — it is intended for **fine-tuning models that have already been sparsified**.
|
||||||
|
|
||||||
|
Pre-sparsified checkpoints can be:
|
||||||
|
- Generated using [LLMCompressor](https://github.com/vllm-project/llm-compressor)
|
||||||
|
- Downloaded from [Neural Magic's Hugging Face page](https://huggingface.co/neuralmagic)
|
||||||
|
- Any custom LLM with compatible sparsity patterns that you've created yourself
|
||||||
|
|
||||||
|
To learn more about writing and customizing LLMCompressor recipes, refer to the official documentation:
|
||||||
|
[https://github.com/vllm-project/llm-compressor/blob/main/README.md](https://github.com/vllm-project/llm-compressor/blob/main/README.md)
|
||||||
|
|
||||||
|
### Storage Optimization with save_compressed
|
||||||
|
|
||||||
|
Setting `save_compressed: true` in your configuration enables saving models in a compressed format, which:
|
||||||
|
- Reduces disk space usage by approximately 40%
|
||||||
|
- Maintains compatibility with vLLM for accelerated inference
|
||||||
|
- Maintains compatibility with llmcompressor for further optimization (example: quantization)
|
||||||
|
|
||||||
|
This option is highly recommended when working with sparse models to maximize the benefits of model compression.
|
||||||
|
|
||||||
|
### Example Config
|
||||||
|
|
||||||
|
See [`examples/llama-3/sparse-finetuning.yaml`](examples/llama-3/sparse-finetuning.yaml) for a complete example.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Inference with vLLM
|
||||||
|
|
||||||
|
After fine-tuning your sparse model, you can leverage vLLM for efficient inference.
|
||||||
|
You can also use LLMCompressor to apply additional quantization to your fine-tuned
|
||||||
|
sparse model before inference for even greater performance benefits.:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from vllm import LLM, SamplingParams
|
||||||
|
|
||||||
|
prompts = [
|
||||||
|
"Hello, my name is",
|
||||||
|
"The president of the United States is",
|
||||||
|
"The capital of France is",
|
||||||
|
"The future of AI is",
|
||||||
|
]
|
||||||
|
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
|
||||||
|
llm = LLM("path/to/your/sparse/model")
|
||||||
|
outputs = llm.generate(prompts, sampling_params)
|
||||||
|
|
||||||
|
for output in outputs:
|
||||||
|
prompt = output.prompt
|
||||||
|
generated_text = output.outputs[0].text
|
||||||
|
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
||||||
|
```
|
||||||
|
|
||||||
|
For more details on vLLM's capabilities and advanced configuration options, see the [official vLLM documentation](https://docs.vllm.ai/).
|
||||||
|
|
||||||
|
## Learn More
|
||||||
|
|
||||||
|
For details on available sparsity and quantization schemes, fine-tuning recipes, and usage examples, visit the official LLMCompressor repository:
|
||||||
|
|
||||||
|
[https://github.com/vllm-project/llm-compressor](https://github.com/vllm-project/llm-compressor)
|
||||||
5
src/axolotl/integrations/llm_compressor/__init__.py
Normal file
5
src/axolotl/integrations/llm_compressor/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
"""Integration entry point for the LLMCompressor plugin."""
|
||||||
|
|
||||||
|
from .plugin import LLMCompressorPlugin
|
||||||
|
|
||||||
|
__all__ = ["LLMCompressorPlugin"]
|
||||||
40
src/axolotl/integrations/llm_compressor/args.py
Normal file
40
src/axolotl/integrations/llm_compressor/args.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
"""
|
||||||
|
LLMCompressor and Sparse Finetuning config models.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from typing_extensions import Annotated
|
||||||
|
|
||||||
|
|
||||||
|
class CompressionArgs(BaseModel):
|
||||||
|
"""Sparse Finetuning config for LLMCompressor."""
|
||||||
|
|
||||||
|
# Typing for recipe is set to Any due to:
|
||||||
|
# https://github.com/vllm-project/llm-compressor/issues/1319
|
||||||
|
recipe: Annotated[
|
||||||
|
Any,
|
||||||
|
Field(
|
||||||
|
description="The recipe containing the compression algorithms and hyperparameters to apply."
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
save_compressed: Annotated[
|
||||||
|
bool,
|
||||||
|
Field(
|
||||||
|
default=False,
|
||||||
|
description="Whether to save the compressed model after training.",
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class LLMCompressorArgs(BaseModel):
|
||||||
|
"""LLMCompressor configuration BaseModel."""
|
||||||
|
|
||||||
|
llmcompressor: Annotated[
|
||||||
|
CompressionArgs,
|
||||||
|
Field(
|
||||||
|
description="Arguments enabling compression pathways through the LLM Compressor plugins"
|
||||||
|
),
|
||||||
|
]
|
||||||
171
src/axolotl/integrations/llm_compressor/plugin.py
Normal file
171
src/axolotl/integrations/llm_compressor/plugin.py
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
"""
|
||||||
|
Sparse Finetuning plugin for Axolotl — enables handling of sparse neural networks
|
||||||
|
by maintaining masks for zero weights during training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from functools import wraps
|
||||||
|
from typing import Any, Callable, Concatenate, ParamSpec, TypeVar
|
||||||
|
|
||||||
|
from llmcompressor import active_session, create_session
|
||||||
|
from llmcompressor.core import callbacks as session_callbacks
|
||||||
|
from llmcompressor.recipe import Recipe
|
||||||
|
from torch.nn import Module
|
||||||
|
from transformers.trainer import Trainer
|
||||||
|
from transformers.trainer_callback import TrainerCallback, TrainerControl, TrainerState
|
||||||
|
from transformers.training_args import TrainingArguments
|
||||||
|
|
||||||
|
from axolotl.integrations.base import BasePlugin
|
||||||
|
|
||||||
|
P = ParamSpec("P") # Params for generic function signatures
|
||||||
|
R = TypeVar("R") # Return type for generic function signatures
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl.integrations.llm_compressor")
|
||||||
|
|
||||||
|
|
||||||
|
class LLMCompressorCallbackHandler(TrainerCallback):
|
||||||
|
"""
|
||||||
|
Trainer callback for Sparse Finetuning.
|
||||||
|
Maintains sparsity patterns during training by applying masks after optimization steps,
|
||||||
|
ensuring zero-weight updates are canceled out.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, trainer: Trainer, recipe: Any):
|
||||||
|
"""
|
||||||
|
Initialize the Sparse Finetuning callback handler.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
trainer (Trainer): Huggingface Trainer instance.
|
||||||
|
recipe (Recipe | dict): Sparse finetuning recipe to apply.
|
||||||
|
"""
|
||||||
|
super().__init__()
|
||||||
|
self.trainer = trainer
|
||||||
|
self.recipe = (
|
||||||
|
Recipe.model_validate(recipe) if not isinstance(recipe, Recipe) else recipe
|
||||||
|
)
|
||||||
|
self.original_compute_loss = trainer.compute_loss
|
||||||
|
self.trainer.compute_loss = compute_loss_wrapper(self.trainer.compute_loss)
|
||||||
|
create_session()
|
||||||
|
|
||||||
|
def on_train_begin(
|
||||||
|
self,
|
||||||
|
args: TrainingArguments,
|
||||||
|
state: TrainerState,
|
||||||
|
control: TrainerControl,
|
||||||
|
**kwargs,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Called at the beginning of training. Initializes the compression session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args (TrainingArguments): Training arguments.
|
||||||
|
state (TrainerState): Trainer state.
|
||||||
|
control (TrainerControl): Trainer control.
|
||||||
|
"""
|
||||||
|
super().on_train_begin(args, state, control, **kwargs)
|
||||||
|
self.trainer.accelerator.wait_for_everyone()
|
||||||
|
active_session().initialize(
|
||||||
|
model=self.trainer.model,
|
||||||
|
optimizer=self.trainer.optimizer,
|
||||||
|
start=state.epoch,
|
||||||
|
recipe=self.recipe,
|
||||||
|
)
|
||||||
|
self.trainer.accelerator.wait_for_everyone()
|
||||||
|
|
||||||
|
def on_step_begin(
|
||||||
|
self,
|
||||||
|
args: TrainingArguments,
|
||||||
|
state: TrainerState,
|
||||||
|
control: TrainerControl,
|
||||||
|
**kwargs,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Called at the beginning of a training step. Triggers batch_start callback.
|
||||||
|
"""
|
||||||
|
super().on_step_begin(args, state, control, **kwargs)
|
||||||
|
session_callbacks.batch_start()
|
||||||
|
|
||||||
|
def on_step_end(
|
||||||
|
self,
|
||||||
|
args: TrainingArguments,
|
||||||
|
state: TrainerState,
|
||||||
|
control: TrainerControl,
|
||||||
|
**kwargs,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Called at the end of a training step. Triggers optimizer and batch_end callbacks.
|
||||||
|
"""
|
||||||
|
super().on_step_end(args, state, control, **kwargs)
|
||||||
|
session_callbacks.optim_pre_step()
|
||||||
|
session_callbacks.optim_post_step()
|
||||||
|
session_callbacks.batch_end()
|
||||||
|
|
||||||
|
def on_train_end(
|
||||||
|
self,
|
||||||
|
args: TrainingArguments,
|
||||||
|
state: TrainerState,
|
||||||
|
control: TrainerControl,
|
||||||
|
**kwargs,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Called at the end of training. Finalizes the compression session.
|
||||||
|
"""
|
||||||
|
super().on_train_end(args, state, control, **kwargs)
|
||||||
|
active_session().finalize()
|
||||||
|
self.trainer.compute_loss_func = self.original_compute_loss
|
||||||
|
|
||||||
|
|
||||||
|
class LLMCompressorPlugin(BasePlugin):
|
||||||
|
"""
|
||||||
|
Sparse Finetuning plugin for Axolotl integration.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_input_args(self) -> str:
|
||||||
|
"""
|
||||||
|
Returns the path to the plugin's argument definition.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Dotted path to the LLMCompressorArgs class.
|
||||||
|
"""
|
||||||
|
return "axolotl.integrations.llm_compressor.args.LLMCompressorArgs"
|
||||||
|
|
||||||
|
def add_callbacks_post_trainer(self, cfg: Any, trainer: Trainer) -> list:
|
||||||
|
"""
|
||||||
|
Adds Sparse Finetuning callback to the Trainer instance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (Any): Configuration object containing the sparse recipe.
|
||||||
|
trainer (Trainer): Huggingface Trainer instance.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List containing the configured callback instances.
|
||||||
|
"""
|
||||||
|
LOG.info("Adding Sparse Finetuning callback to the trainer")
|
||||||
|
callback = LLMCompressorCallbackHandler(
|
||||||
|
trainer=trainer,
|
||||||
|
recipe=cfg.llmcompressor.recipe,
|
||||||
|
)
|
||||||
|
return [callback]
|
||||||
|
|
||||||
|
|
||||||
|
def compute_loss_wrapper(
|
||||||
|
compute_loss_func: Callable[Concatenate[Module, P], R],
|
||||||
|
) -> Callable[Concatenate[Module, P], R]:
|
||||||
|
"""
|
||||||
|
Wraps the loss computation function to trigger the loss_calculated callback.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
compute_loss_func (Callable): Original loss computation function.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Callable: Wrapped function that also invokes the loss_calculated callback.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@wraps(compute_loss_func)
|
||||||
|
def compute_and_notify(model: Module, *args: P.args, **kwargs: P.kwargs) -> R:
|
||||||
|
loss = compute_loss_func(model, *args, **kwargs)
|
||||||
|
if active_session().lifecycle.initialized_ and model.training:
|
||||||
|
session_callbacks.loss_calculated(loss=loss)
|
||||||
|
return loss
|
||||||
|
|
||||||
|
return compute_and_notify
|
||||||
40
src/axolotl/integrations/llm_compressor/utils.py
Normal file
40
src/axolotl/integrations/llm_compressor/utils.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
"""Utilities for llmcompressor integration with axolotl."""
|
||||||
|
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
from llmcompressor.transformers.sparsification.compressed_tensors_utils import (
|
||||||
|
modify_save_pretrained,
|
||||||
|
)
|
||||||
|
from transformers import PreTrainedModel, Trainer
|
||||||
|
|
||||||
|
|
||||||
|
def save_compressed_model(
|
||||||
|
model: PreTrainedModel,
|
||||||
|
output_dir: Union[str, bytes],
|
||||||
|
trainer: Trainer,
|
||||||
|
safe_serialization: bool = False,
|
||||||
|
save_compressed: bool = False,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Synchronize processes, apply compression hooks, and save the model.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model (PreTrainedModel): The model to be saved.
|
||||||
|
output_dir (str or bytes): Path where the model files will be written.
|
||||||
|
trainer (Trainer): Hugging Face Trainer for process synchronization.
|
||||||
|
safe_serialization (bool): Use safe serialization if True.
|
||||||
|
save_compressed (bool): Write compressed tensors if True.
|
||||||
|
"""
|
||||||
|
trainer.accelerator.wait_for_everyone()
|
||||||
|
|
||||||
|
# Only the main process writes the files
|
||||||
|
if not trainer.accelerator.is_main_process:
|
||||||
|
return
|
||||||
|
|
||||||
|
modify_save_pretrained(model)
|
||||||
|
model.save_pretrained(
|
||||||
|
output_dir,
|
||||||
|
safe_serialization=safe_serialization,
|
||||||
|
save_compressed=save_compressed,
|
||||||
|
skip_sparsity_compression_stats=not save_compressed,
|
||||||
|
)
|
||||||
@@ -6,6 +6,7 @@ import os
|
|||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
import weakref
|
import weakref
|
||||||
|
from contextlib import nullcontext
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
@@ -25,6 +26,9 @@ from axolotl.contribs.lgpl import ( # pylint: disable = no-name-in-module
|
|||||||
fix_untrained_tokens,
|
fix_untrained_tokens,
|
||||||
)
|
)
|
||||||
from axolotl.core.trainer_builder import HFCausalTrainerBuilder, HFRLTrainerBuilder
|
from axolotl.core.trainer_builder import HFCausalTrainerBuilder, HFRLTrainerBuilder
|
||||||
|
from axolotl.core.trainers.mixins.sequence_parallel import (
|
||||||
|
SequenceParallelContextManager,
|
||||||
|
)
|
||||||
from axolotl.logging_config import configure_logging
|
from axolotl.logging_config import configure_logging
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.distributed import cleanup_distributed
|
from axolotl.utils.distributed import cleanup_distributed
|
||||||
@@ -185,16 +189,28 @@ def execute_training(
|
|||||||
trainer: The configured trainer object.
|
trainer: The configured trainer object.
|
||||||
resume_from_checkpoint: Path to checkpoint to resume from, if applicable.
|
resume_from_checkpoint: Path to checkpoint to resume from, if applicable.
|
||||||
"""
|
"""
|
||||||
LOG.info("Starting trainer...")
|
# Define the context managers to use
|
||||||
if cfg.flash_optimum:
|
flash_context = (
|
||||||
with torch.backends.cuda.sdp_kernel(
|
torch.backends.cuda.sdp_kernel(
|
||||||
# TODO configure these from the YAML w/ sdp_kernel_kwargs: ...
|
|
||||||
enable_flash=True,
|
enable_flash=True,
|
||||||
enable_math=True,
|
enable_math=True,
|
||||||
enable_mem_efficient=True,
|
enable_mem_efficient=True,
|
||||||
):
|
)
|
||||||
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
|
if cfg.flash_optimum
|
||||||
else:
|
else nullcontext()
|
||||||
|
)
|
||||||
|
sequence_parallel_context = (
|
||||||
|
SequenceParallelContextManager(
|
||||||
|
model=trainer.model,
|
||||||
|
sequence_parallel_degree=cfg.sequence_parallel_degree,
|
||||||
|
ring_attn_func=cfg.ring_attn_func,
|
||||||
|
)
|
||||||
|
if cfg.sequence_parallel_degree > 1
|
||||||
|
else nullcontext()
|
||||||
|
)
|
||||||
|
|
||||||
|
LOG.info("Starting trainer...")
|
||||||
|
with flash_context, sequence_parallel_context:
|
||||||
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
|
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
|
||||||
|
|
||||||
|
|
||||||
@@ -279,8 +295,23 @@ def save_trained_model(
|
|||||||
trainer.model.save_pretrained(
|
trainer.model.save_pretrained(
|
||||||
cfg.output_dir, safe_serialization=safe_serialization
|
cfg.output_dir, safe_serialization=safe_serialization
|
||||||
)
|
)
|
||||||
|
|
||||||
model.save_pretrained(cfg.output_dir, safe_serialization=safe_serialization)
|
model.save_pretrained(cfg.output_dir, safe_serialization=safe_serialization)
|
||||||
|
|
||||||
|
if hasattr(cfg, "llmcompressor") and cfg.llmcompressor:
|
||||||
|
# TODO: add integration support so this can be implemented completely within the plugin
|
||||||
|
from axolotl.integrations.llm_compressor.utils import (
|
||||||
|
save_compressed_model,
|
||||||
|
)
|
||||||
|
|
||||||
|
save_compressed_model(
|
||||||
|
model=model,
|
||||||
|
output_dir=cfg.output_dir,
|
||||||
|
trainer=trainer,
|
||||||
|
safe_serialization=safe_serialization,
|
||||||
|
save_compressed=cfg.llmcompressor.save_compressed,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def create_model_card(cfg: DictDefault, trainer: Trainer):
|
def create_model_card(cfg: DictDefault, trainer: Trainer):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -1,20 +1,12 @@
|
|||||||
"""
|
"""Data collators for axolotl to pad labels and position_ids for packed sequences"""
|
||||||
Data collators for axolotl to pad labels and position_ids for packed sequences. Also
|
|
||||||
includes logic for handling sequence parallelism collation.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
|
||||||
import torch.distributed as dist
|
|
||||||
from transformers import PreTrainedTokenizerBase
|
from transformers import PreTrainedTokenizerBase
|
||||||
from transformers.utils import PaddingStrategy
|
from transformers.utils import PaddingStrategy
|
||||||
|
|
||||||
from axolotl.monkeypatch.attention.ring_attn import update_ring_attn_params
|
|
||||||
from axolotl.monkeypatch.attention.ring_attn.patch import RingAttnFunc
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class DataCollatorForSeq2Seq:
|
class DataCollatorForSeq2Seq:
|
||||||
@@ -49,8 +41,6 @@ class DataCollatorForSeq2Seq:
|
|||||||
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
|
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
|
||||||
return_tensors (`str`):
|
return_tensors (`str`):
|
||||||
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
|
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
|
||||||
sequence_parallel_degree (`int`):
|
|
||||||
The degree of sequence parallelism. Default to 1 for no sequence parallelism.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
tokenizer: PreTrainedTokenizerBase
|
tokenizer: PreTrainedTokenizerBase
|
||||||
@@ -61,17 +51,6 @@ class DataCollatorForSeq2Seq:
|
|||||||
label_pad_token_id: int = -100
|
label_pad_token_id: int = -100
|
||||||
position_pad_token_id: int = 0
|
position_pad_token_id: int = 0
|
||||||
return_tensors: str = "pt"
|
return_tensors: str = "pt"
|
||||||
sequence_parallel_degree: int = 1
|
|
||||||
ring_attn_func: RingAttnFunc | None = None
|
|
||||||
|
|
||||||
def __post_init__(self):
|
|
||||||
if self.sequence_parallel_degree > 1:
|
|
||||||
from axolotl.monkeypatch.attention.ring_attn import get_ring_attn_group
|
|
||||||
|
|
||||||
# Get information about our position in the SP group
|
|
||||||
sp_group = get_ring_attn_group()
|
|
||||||
self.local_rank = dist.get_rank(group=sp_group)
|
|
||||||
self.local_world_size = dist.get_world_size(group=sp_group)
|
|
||||||
|
|
||||||
def __call__(self, features, return_tensors=None):
|
def __call__(self, features, return_tensors=None):
|
||||||
has_attn_mask = "attention_mask" in features[0].keys()
|
has_attn_mask = "attention_mask" in features[0].keys()
|
||||||
@@ -141,62 +120,8 @@ class DataCollatorForSeq2Seq:
|
|||||||
)
|
)
|
||||||
features["decoder_input_ids"] = decoder_input_ids
|
features["decoder_input_ids"] = decoder_input_ids
|
||||||
|
|
||||||
if self.sequence_parallel_degree > 1:
|
|
||||||
features = self.apply_sequence_parallelism(features)
|
|
||||||
|
|
||||||
return features
|
return features
|
||||||
|
|
||||||
def apply_sequence_parallelism(
|
|
||||||
self, batch: dict[str, torch.Tensor]
|
|
||||||
) -> torch.Tensor:
|
|
||||||
"""
|
|
||||||
Apply sequence parallelism slicing to a batch.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
batch: Batch dictionary from parent collator.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Sliced batch dictionary.
|
|
||||||
"""
|
|
||||||
# Get local (start, end) for sequence parallelism slicing
|
|
||||||
total_seq_len = batch["input_ids"].size(1)
|
|
||||||
|
|
||||||
# Update params for varlen ring attention calculation
|
|
||||||
if batch.get("position_ids") is not None:
|
|
||||||
update_ring_attn_params(position_ids=batch["position_ids"])
|
|
||||||
|
|
||||||
# Slice batch for sequence parallel processing
|
|
||||||
for key in batch:
|
|
||||||
if batch[key].size(1) == total_seq_len:
|
|
||||||
if self.ring_attn_func in [
|
|
||||||
RingAttnFunc.VARLEN_LLAMA3,
|
|
||||||
RingAttnFunc.BATCH_RING,
|
|
||||||
]:
|
|
||||||
batch[key] = (
|
|
||||||
batch[key]
|
|
||||||
.chunk(self.local_world_size, dim=1)[self.local_rank]
|
|
||||||
.contiguous()
|
|
||||||
)
|
|
||||||
elif self.ring_attn_func is RingAttnFunc.BATCH_ZIGZAG:
|
|
||||||
chunks = batch[key].chunk(2 * self.local_world_size, dim=1)
|
|
||||||
|
|
||||||
# Take rank's chunk and opposing chunk for zigzag pattern
|
|
||||||
selected_chunks = [
|
|
||||||
chunks[self.local_rank],
|
|
||||||
chunks[2 * self.local_world_size - self.local_rank - 1],
|
|
||||||
]
|
|
||||||
batch[key] = torch.cat(selected_chunks, dim=1).contiguous()
|
|
||||||
elif self.ring_attn_func is RingAttnFunc.BATCH_STRIPE:
|
|
||||||
# TODO(djsaunde): This doesn't seem to work as expected
|
|
||||||
# Split into striped data and stack
|
|
||||||
tensor = torch.stack(
|
|
||||||
batch[key].split(self.local_world_size, dim=1),
|
|
||||||
dim=1,
|
|
||||||
).transpose(1, 2)
|
|
||||||
batch[key] = tensor[:, self.local_rank].contiguous()
|
|
||||||
|
|
||||||
return batch
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class BatchSamplerDataCollatorForSeq2Seq(DataCollatorForSeq2Seq):
|
class BatchSamplerDataCollatorForSeq2Seq(DataCollatorForSeq2Seq):
|
||||||
|
|||||||
@@ -126,9 +126,6 @@ def normalize_config(cfg):
|
|||||||
with open(ds_config_path, encoding="utf-8") as f:
|
with open(ds_config_path, encoding="utf-8") as f:
|
||||||
cfg.deepspeed = json.load(f)
|
cfg.deepspeed = json.load(f)
|
||||||
|
|
||||||
if cfg.sequence_parallel_degree is None:
|
|
||||||
cfg.sequence_parallel_degree = 1
|
|
||||||
|
|
||||||
if cfg.saves_per_epoch:
|
if cfg.saves_per_epoch:
|
||||||
save_steps = 1.0 / (cfg.saves_per_epoch * cfg.num_epochs)
|
save_steps = 1.0 / (cfg.saves_per_epoch * cfg.num_epochs)
|
||||||
if save_steps < 1.0: # prevent saves on every step
|
if save_steps < 1.0: # prevent saves on every step
|
||||||
|
|||||||
@@ -134,10 +134,9 @@ def prepare_dataset(cfg, tokenizer, processor=None, preprocess_iterable=None):
|
|||||||
"csv", data_files=f.name, split="train", streaming=True
|
"csv", data_files=f.name, split="train", streaming=True
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
if is_local_main_process():
|
iter_ds = load_dataset(
|
||||||
iter_ds = load_dataset(
|
path, streaming=True, split=split, name=name, data_files=data_files
|
||||||
path, streaming=True, split=split, name=name, data_files=data_files
|
)
|
||||||
)
|
|
||||||
|
|
||||||
if skip:
|
if skip:
|
||||||
LOG.info(f"Skipping {skip} samples from the dataset")
|
LOG.info(f"Skipping {skip} samples from the dataset")
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
"""custom checkpointing utils"""
|
"""custom checkpointing utils"""
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
from axolotl.utils.gradient_checkpointing.unsloth import (
|
from axolotl.utils.gradient_checkpointing.unsloth import (
|
||||||
Unsloth_Offloaded_Gradient_Checkpointer,
|
Unsloth_Offloaded_Gradient_Checkpointer,
|
||||||
)
|
)
|
||||||
@@ -9,6 +11,10 @@ def hf_grad_checkpoint_offload_wrapper(
|
|||||||
decoder_layer, *args, use_reentrant=None
|
decoder_layer, *args, use_reentrant=None
|
||||||
): # pylint: disable=unused-argument
|
): # pylint: disable=unused-argument
|
||||||
return Unsloth_Offloaded_Gradient_Checkpointer.apply(
|
return Unsloth_Offloaded_Gradient_Checkpointer.apply(
|
||||||
decoder_layer.__self__,
|
(
|
||||||
|
decoder_layer.func.__self__
|
||||||
|
if isinstance(decoder_layer, partial)
|
||||||
|
else decoder_layer.__self__
|
||||||
|
),
|
||||||
*args,
|
*args,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -139,6 +139,22 @@ def check_model_config(cfg: DictDefault, model_config: PretrainedConfig):
|
|||||||
hasattr(model_config, "quantization_config")
|
hasattr(model_config, "quantization_config")
|
||||||
and model_config.quantization_config
|
and model_config.quantization_config
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Detect compressed-tensors config
|
||||||
|
is_compressed_tensors_config = (
|
||||||
|
quant_config_exists
|
||||||
|
and model_config.quantization_config.get("quant_method") == "compressed-tensors"
|
||||||
|
)
|
||||||
|
|
||||||
|
if is_compressed_tensors_config:
|
||||||
|
if model_config.quantization_config.get("config_groups"):
|
||||||
|
LOG.warning(
|
||||||
|
"Found `config_groups` in a compressed-tensors config. "
|
||||||
|
"QAT integration with llmcompressor is not tested."
|
||||||
|
)
|
||||||
|
# Skip further quant checks for compressed-tensors
|
||||||
|
return
|
||||||
|
|
||||||
quant_config_method_is_gptq = (
|
quant_config_method_is_gptq = (
|
||||||
quant_config_exists
|
quant_config_exists
|
||||||
and "quant_method" in model_config.quantization_config
|
and "quant_method" in model_config.quantization_config
|
||||||
|
|||||||
@@ -1149,22 +1149,17 @@ class AxolotlInputConfig(
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
@field_validator("sequence_parallel_degree", mode="after")
|
@model_validator(mode="after")
|
||||||
@classmethod
|
def check_sequence_parallel_degree(self):
|
||||||
def check_sequence_parallel_degree(cls, value, info):
|
if not self.sequence_parallel_degree:
|
||||||
if not value:
|
self.sequence_parallel_degree = 1
|
||||||
value = 1
|
elif self.sequence_parallel_degree > 1:
|
||||||
|
if not self.flash_attention:
|
||||||
if value > 1:
|
|
||||||
if not info.data.get("flash_attention"):
|
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"flash_attention: true must be set with sequence_parallel_degree > 1"
|
"flash_attention: true must be set with sequence_parallel_degree > 1"
|
||||||
)
|
)
|
||||||
|
|
||||||
if (
|
if self.sample_packing and self.micro_batch_size > 1:
|
||||||
info.data.get("sample_packing")
|
|
||||||
and not info.data["micro_batch_size"] == 1
|
|
||||||
):
|
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"micro_batch_size must be set to 1 when sample_packing is enabled"
|
"micro_batch_size must be set to 1 when sample_packing is enabled"
|
||||||
"due to a `ring-flash-attn` requirement"
|
"due to a `ring-flash-attn` requirement"
|
||||||
@@ -1184,42 +1179,40 @@ class AxolotlInputConfig(
|
|||||||
# according to the proportion of non-padding tokens per rank.
|
# according to the proportion of non-padding tokens per rank.
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
"Sequence parallelism (SP) is enabled with "
|
"Sequence parallelism (SP) is enabled with "
|
||||||
f"sequence_parallel_degree={value}. Please note that logged losses may "
|
f"sequence_parallel_degree={self.sequence_parallel_degree}. "
|
||||||
"differ slightly to the non-SP losses due to transformers Trainer "
|
"Please note that logged losses may differ slightly to the non-SP "
|
||||||
"implementation details. Please see "
|
"losses due to transformers Trainer implementation details. "
|
||||||
"https://github.com/axolotl-ai-cloud/axolotl/pull/2495#issuecomment-2784022042 "
|
"Please see https://github.com/axolotl-ai-cloud/axolotl/pull/2495#issuecomment-2784022042 "
|
||||||
"for more details."
|
"for more details."
|
||||||
)
|
)
|
||||||
|
|
||||||
return value
|
return self
|
||||||
|
|
||||||
@field_validator("ring_attn_func", mode="after")
|
@model_validator(mode="after")
|
||||||
@classmethod
|
def validate_ring_attn_func(self):
|
||||||
def check_ring_attn_func(cls, value, info):
|
if getattr(self, "sequence_parallel_degree", 1) == 1:
|
||||||
if not info.data.get("sequence_parallel_degree", 1) > 1:
|
return self
|
||||||
return value
|
|
||||||
|
|
||||||
from axolotl.monkeypatch.attention.ring_attn.patch import RingAttnFunc
|
from axolotl.monkeypatch.attention.ring_attn.patch import RingAttnFunc
|
||||||
|
|
||||||
if value is not None:
|
if self.ring_attn_func is not None:
|
||||||
# Set the ring attention function if passed in config
|
|
||||||
valid_funcs = list(RingAttnFunc)
|
valid_funcs = list(RingAttnFunc)
|
||||||
if value in valid_funcs:
|
if self.ring_attn_func in valid_funcs:
|
||||||
value = RingAttnFunc(value)
|
self.ring_attn_func = RingAttnFunc(self.ring_attn_func)
|
||||||
else:
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"ring_attn_func: {value} must be one of {valid_funcs}"
|
f"ring_attn_func: {self.ring_attn_func} must be in {valid_funcs}"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# Default ring attention function selection
|
# Default ring attention function selection
|
||||||
sample_packing = info.data.get("sample_packing")
|
sample_packing = getattr(self, "sample_packing", False)
|
||||||
value = (
|
self.ring_attn_func = (
|
||||||
RingAttnFunc.VARLEN_LLAMA3
|
RingAttnFunc.VARLEN_LLAMA3
|
||||||
if sample_packing
|
if sample_packing
|
||||||
else RingAttnFunc.BATCH_RING
|
else RingAttnFunc.BATCH_RING
|
||||||
)
|
)
|
||||||
|
|
||||||
return value
|
return self
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|||||||
@@ -348,7 +348,7 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset):
|
|||||||
load_from_cache_file=not cfg.is_preprocess,
|
load_from_cache_file=not cfg.is_preprocess,
|
||||||
desc="Add position_id column (PoSE)",
|
desc="Add position_id column (PoSE)",
|
||||||
)
|
)
|
||||||
elif cfg.sample_packing or cfg.sequence_parallel_degree > 1:
|
elif cfg.sample_packing:
|
||||||
drop_long_kwargs = {}
|
drop_long_kwargs = {}
|
||||||
if filter_map_kwargs:
|
if filter_map_kwargs:
|
||||||
drop_long_kwargs["desc"] = "Add position_id column (Sample Packing)"
|
drop_long_kwargs["desc"] = "Add position_id column (Sample Packing)"
|
||||||
@@ -358,7 +358,7 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset):
|
|||||||
**filter_map_kwargs,
|
**filter_map_kwargs,
|
||||||
**drop_long_kwargs,
|
**drop_long_kwargs,
|
||||||
)
|
)
|
||||||
if cfg.eval_sample_packing or cfg.sequence_parallel_degree > 1:
|
if cfg.eval_sample_packing:
|
||||||
if eval_dataset:
|
if eval_dataset:
|
||||||
eval_dataset = eval_dataset.map(
|
eval_dataset = eval_dataset.map(
|
||||||
add_position_ids,
|
add_position_ids,
|
||||||
@@ -528,6 +528,13 @@ def setup_torch_compile_env(cfg):
|
|||||||
def setup_deepspeed_env(cfg, stage=None):
|
def setup_deepspeed_env(cfg, stage=None):
|
||||||
from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig
|
from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig
|
||||||
|
|
||||||
|
from axolotl.utils.distributed import distributed_state
|
||||||
|
|
||||||
|
if distributed_state and distributed_state.initialized:
|
||||||
|
raise RuntimeError(
|
||||||
|
"Distributed State already initialized before Deepspeed setup"
|
||||||
|
)
|
||||||
|
|
||||||
os.environ["ACCELERATE_USE_DEEPSPEED"] = "true"
|
os.environ["ACCELERATE_USE_DEEPSPEED"] = "true"
|
||||||
os.environ["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = cfg.deepspeed
|
os.environ["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = cfg.deepspeed
|
||||||
if stage:
|
if stage:
|
||||||
|
|||||||
106
tests/e2e/integrations/test_llm_compressor.py
Normal file
106
tests/e2e/integrations/test_llm_compressor.py
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
"""
|
||||||
|
E2E smoke tests for LLMCompressorPlugin integration
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from axolotl.cli.args import TrainerCliArgs
|
||||||
|
from axolotl.common.datasets import load_datasets
|
||||||
|
from axolotl.train import train
|
||||||
|
from axolotl.utils.config import normalize_config, prepare_plugins, validate_config
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
|
from tests.e2e.utils import (
|
||||||
|
check_model_output_exists,
|
||||||
|
require_llmcompressor,
|
||||||
|
require_torch_2_4_1,
|
||||||
|
)
|
||||||
|
|
||||||
|
MODELS = [
|
||||||
|
"nm-testing/llama2.c-stories42M-pruned2.4-compressed",
|
||||||
|
"nm-testing/llama2.c-stories42M-gsm8k-sparse-only-compressed",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"base_model", MODELS, ids=["no-checkpoint-recipe", "with-checkpoint-recipe"]
|
||||||
|
)
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"save_compressed", [True, False], ids=["save_compressed", "save_uncompressed"]
|
||||||
|
)
|
||||||
|
@require_llmcompressor
|
||||||
|
class TestLLMCompressorIntegration:
|
||||||
|
"""
|
||||||
|
e2e tests for axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
||||||
|
"""
|
||||||
|
|
||||||
|
@require_torch_2_4_1
|
||||||
|
def test_llmcompressor_plugin(
|
||||||
|
self, temp_dir, base_model: str, save_compressed: bool
|
||||||
|
):
|
||||||
|
# core cfg
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"base_model": base_model,
|
||||||
|
"plugins": ["axolotl.integrations.llm_compressor.LLMCompressorPlugin"],
|
||||||
|
"sequence_len": 1024,
|
||||||
|
"val_set_size": 0.05,
|
||||||
|
"special_tokens": {"pad_token": "<|endoftext|>"},
|
||||||
|
"datasets": [{"path": "mhenrichsen/alpaca_2k_test", "type": "alpaca"}],
|
||||||
|
"num_epochs": 1,
|
||||||
|
"micro_batch_size": 2,
|
||||||
|
"gradient_accumulation_steps": 2,
|
||||||
|
"output_dir": temp_dir,
|
||||||
|
"learning_rate": 1e-5,
|
||||||
|
"optimizer": "adamw_torch_fused",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"save_safetensors": True,
|
||||||
|
"bf16": "auto",
|
||||||
|
"max_steps": 5,
|
||||||
|
"llmcompressor": {
|
||||||
|
"recipe": {
|
||||||
|
"finetuning_stage": {
|
||||||
|
"finetuning_modifiers": {
|
||||||
|
"ConstantPruningModifier": {
|
||||||
|
"targets": [
|
||||||
|
"re:.*q_proj.weight",
|
||||||
|
"re:.*k_proj.weight",
|
||||||
|
"re:.*v_proj.weight",
|
||||||
|
"re:.*o_proj.weight",
|
||||||
|
"re:.*gate_proj.weight",
|
||||||
|
"re:.*up_proj.weight",
|
||||||
|
"re:.*down_proj.weight",
|
||||||
|
],
|
||||||
|
"start": 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"save_compressed": save_compressed,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
prepare_plugins(cfg)
|
||||||
|
cfg = validate_config(cfg)
|
||||||
|
normalize_config(cfg)
|
||||||
|
cli_args = TrainerCliArgs()
|
||||||
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
_check_llmcompressor_model_outputs(temp_dir, save_compressed)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_llmcompressor_model_outputs(temp_dir, save_compressed):
|
||||||
|
if save_compressed:
|
||||||
|
assert (Path(temp_dir) / "recipe.yaml").exists()
|
||||||
|
|
||||||
|
from compressed_tensors import ModelCompressor
|
||||||
|
from compressed_tensors.config import Sparse24BitMaskConfig
|
||||||
|
|
||||||
|
compressor = ModelCompressor.from_pretrained(temp_dir)
|
||||||
|
assert compressor is not None
|
||||||
|
assert isinstance(compressor.sparsity_config, Sparse24BitMaskConfig)
|
||||||
@@ -4,11 +4,14 @@ GRPO test suite
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
|
import shutil
|
||||||
import subprocess # nosec B404
|
import subprocess # nosec B404
|
||||||
import sys
|
import sys
|
||||||
|
import tempfile
|
||||||
import time
|
import time
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
import psutil
|
||||||
import pytest
|
import pytest
|
||||||
import requests
|
import requests
|
||||||
import yaml
|
import yaml
|
||||||
@@ -21,8 +24,8 @@ from tests.e2e.utils import require_vllm
|
|||||||
|
|
||||||
|
|
||||||
def start_vllm(
|
def start_vllm(
|
||||||
model: str, env: dict | None = None, wait: int | None = None, quiet=False, **kwargs
|
model: str, env: dict, wait: int | None = None, quiet=False, **kwargs
|
||||||
) -> int:
|
) -> subprocess.Popen:
|
||||||
"""
|
"""
|
||||||
helper function to start the VLLM server in the background, mostly for testing purposes
|
helper function to start the VLLM server in the background, mostly for testing purposes
|
||||||
"""
|
"""
|
||||||
@@ -46,10 +49,41 @@ def start_vllm(
|
|||||||
# print out the command to be executed
|
# print out the command to be executed
|
||||||
print(" ".join(cmd))
|
print(" ".join(cmd))
|
||||||
|
|
||||||
|
vllm_logging_json = Path(tempfile.mkdtemp()) / "vllm_logging.json"
|
||||||
|
with open(vllm_logging_json, "w", encoding="utf-8") as temp_file:
|
||||||
|
temp_file.write(
|
||||||
|
"""{
|
||||||
|
"formatters": {
|
||||||
|
"json": {
|
||||||
|
"class": "pythonjsonlogger.jsonlogger.JsonFormatter"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"handlers": {
|
||||||
|
"file": {
|
||||||
|
"class": "logging.FileHandler",
|
||||||
|
"formatter": "json",
|
||||||
|
"level": "DEBUG",
|
||||||
|
"filename": "/tmp/vllm.log",
|
||||||
|
"mode": "a"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"loggers": {
|
||||||
|
"vllm": {
|
||||||
|
"handlers": ["file"],
|
||||||
|
"level": "DEBUG",
|
||||||
|
"propagate": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"version": 1
|
||||||
|
}"""
|
||||||
|
)
|
||||||
|
|
||||||
|
cmd_env = env.copy()
|
||||||
|
cmd_env.update({"VLLM_LOGGING_CONFIG_PATH": vllm_logging_json})
|
||||||
# start `trl vllm-serve` command in the background and capture the process id
|
# start `trl vllm-serve` command in the background and capture the process id
|
||||||
process = subprocess.Popen( # pylint: disable=consider-using-with
|
process = subprocess.Popen( # pylint: disable=consider-using-with
|
||||||
cmd,
|
cmd,
|
||||||
env=env,
|
env=cmd_env,
|
||||||
stdout=subprocess.DEVNULL if quiet else subprocess.PIPE,
|
stdout=subprocess.DEVNULL if quiet else subprocess.PIPE,
|
||||||
stderr=subprocess.DEVNULL if quiet else subprocess.PIPE,
|
stderr=subprocess.DEVNULL if quiet else subprocess.PIPE,
|
||||||
) # nosec B603
|
) # nosec B603
|
||||||
@@ -58,32 +92,51 @@ def start_vllm(
|
|||||||
print(f"VLLM server process started (PID: {process.pid})")
|
print(f"VLLM server process started (PID: {process.pid})")
|
||||||
|
|
||||||
# wait until the http server is ready, even if it 404s, but timeout after 60 seconds
|
# wait until the http server is ready, even if it 404s, but timeout after 60 seconds
|
||||||
|
period_seconds = 5
|
||||||
started = False
|
started = False
|
||||||
if wait and host and port:
|
if wait and host and port:
|
||||||
for _ in range(int(wait)):
|
for i in range(0, int(wait), period_seconds):
|
||||||
try:
|
try:
|
||||||
response = requests.get(f"http://{host}:{port}", timeout=1)
|
response = requests.get(f"http://{host}:{port}", timeout=1)
|
||||||
|
print(f"{i}: VLLM server (status: {response.status_code})")
|
||||||
if int(response.status_code) in [200, 404]:
|
if int(response.status_code) in [200, 404]:
|
||||||
started = True
|
started = True
|
||||||
break
|
break
|
||||||
except requests.exceptions.RequestException:
|
except requests.exceptions.RequestException as exc:
|
||||||
pass
|
print(f"{i}: VLLM server failed to start: {str(exc)}")
|
||||||
|
|
||||||
# also check if the process.pid is still running
|
# also check if the process.pid is still running
|
||||||
if not process.poll() is None:
|
if not process.poll() is None:
|
||||||
break
|
break
|
||||||
|
|
||||||
time.sleep(1)
|
time.sleep(period_seconds)
|
||||||
|
|
||||||
if wait and not started:
|
if wait and not started:
|
||||||
print(
|
print(
|
||||||
f"VLLM server process did not start within {wait} seconds. Please check your server logs."
|
f"VLLM server process did not start within {wait} seconds. Please check your server logs."
|
||||||
)
|
)
|
||||||
process.kill()
|
recursive_kill(process)
|
||||||
|
with open("/tmp/vllm.log", "r", encoding="utf-8") as log_file:
|
||||||
|
print(log_file.read())
|
||||||
|
shutil.rmtree("/tmp/vllm.log")
|
||||||
raise RuntimeError(f"VLLM server process did not start within {wait} seconds.")
|
raise RuntimeError(f"VLLM server process did not start within {wait} seconds.")
|
||||||
|
|
||||||
# return the process id
|
# return the process
|
||||||
return process.pid
|
return process
|
||||||
|
|
||||||
|
|
||||||
|
def recursive_kill(process: subprocess.Popen):
|
||||||
|
"""
|
||||||
|
Recursively kill a process and its children
|
||||||
|
"""
|
||||||
|
process = psutil.Process(process.pid)
|
||||||
|
for child in psutil.Process(process.pid).children(recursive=True):
|
||||||
|
child.terminate()
|
||||||
|
child.kill()
|
||||||
|
os.kill(child.pid, 9)
|
||||||
|
process.terminate()
|
||||||
|
process.kill()
|
||||||
|
os.kill(process.pid, 9)
|
||||||
|
|
||||||
|
|
||||||
class TestGRPO:
|
class TestGRPO:
|
||||||
@@ -174,16 +227,17 @@ def oai_gsm8k_transform(cfg, *args, **kwargs):
|
|||||||
|
|
||||||
current_env = os.environ.copy()
|
current_env = os.environ.copy()
|
||||||
env = {
|
env = {
|
||||||
"NCCL_P2P_LEVEL": "LOC",
|
"NCCL_P2P_LEVEL": "NVL",
|
||||||
**current_env,
|
**current_env,
|
||||||
"CUDA_VISIBLE_DEVICES": "1",
|
"CUDA_VISIBLE_DEVICES": "1",
|
||||||
"VLLM_USE_V1": "0",
|
"VLLM_DISABLE_COMPILE_CACHE": "1",
|
||||||
|
# "VLLM_USE_V1": "0",
|
||||||
}
|
}
|
||||||
vllm_process_id = start_vllm(
|
vllm_process = start_vllm(
|
||||||
cfg.base_model,
|
cfg.base_model,
|
||||||
env=env,
|
env=env,
|
||||||
quiet=True,
|
quiet=True,
|
||||||
wait=120,
|
wait=300,
|
||||||
gpu_memory_utilization=0.15,
|
gpu_memory_utilization=0.15,
|
||||||
max_model_len=cfg.vllm.max_model_len,
|
max_model_len=cfg.vllm.max_model_len,
|
||||||
enable_prefix_caching=cfg.vllm.enable_prefix_caching,
|
enable_prefix_caching=cfg.vllm.enable_prefix_caching,
|
||||||
@@ -202,10 +256,14 @@ def oai_gsm8k_transform(cfg, *args, **kwargs):
|
|||||||
"--main-process-port",
|
"--main-process-port",
|
||||||
f"{get_torch_dist_unique_port()}",
|
f"{get_torch_dist_unique_port()}",
|
||||||
],
|
],
|
||||||
env={"NCCL_P2P_LEVEL": "LOC", "NCCL_DEBUG": "INFO", **current_env},
|
env={
|
||||||
|
"NCCL_P2P_LEVEL": "NVL",
|
||||||
|
"NCCL_DEBUG": "INFO",
|
||||||
|
**current_env,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
finally:
|
finally:
|
||||||
os.kill(vllm_process_id, 9)
|
recursive_kill(vllm_process)
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"num_gpus",
|
"num_gpus",
|
||||||
@@ -262,16 +320,17 @@ def oai_gsm8k_transform(cfg, *args, **kwargs):
|
|||||||
|
|
||||||
current_env = os.environ.copy()
|
current_env = os.environ.copy()
|
||||||
env = {
|
env = {
|
||||||
"NCCL_P2P_LEVEL": "LOC", # nccl can be brittle, assume P2P isn't reliable
|
"NCCL_P2P_LEVEL": "NVL", # nccl can be brittle, assume P2P isn't reliable
|
||||||
**current_env,
|
**current_env,
|
||||||
"CUDA_VISIBLE_DEVICES": "1",
|
"CUDA_VISIBLE_DEVICES": "1",
|
||||||
"VLLM_USE_V1": "0",
|
"VLLM_DISABLE_COMPILE_CACHE": "1",
|
||||||
|
# "VLLM_USE_V1": "0",
|
||||||
}
|
}
|
||||||
vllm_process_id = start_vllm(
|
vllm_process = start_vllm(
|
||||||
cfg.base_model,
|
cfg.base_model,
|
||||||
env=env,
|
env=env,
|
||||||
quiet=True,
|
quiet=True,
|
||||||
wait=120,
|
wait=300,
|
||||||
gpu_memory_utilization=0.15,
|
gpu_memory_utilization=0.15,
|
||||||
max_model_len=cfg.vllm.max_model_len,
|
max_model_len=cfg.vllm.max_model_len,
|
||||||
enable_prefix_caching=cfg.vllm.enable_prefix_caching,
|
enable_prefix_caching=cfg.vllm.enable_prefix_caching,
|
||||||
@@ -290,7 +349,11 @@ def oai_gsm8k_transform(cfg, *args, **kwargs):
|
|||||||
"--main-process-port",
|
"--main-process-port",
|
||||||
f"{get_torch_dist_unique_port()}",
|
f"{get_torch_dist_unique_port()}",
|
||||||
],
|
],
|
||||||
env={"NCCL_P2P_LEVEL": "LOC", "NCCL_DEBUG": "INFO", **current_env},
|
env={
|
||||||
|
"NCCL_P2P_LEVEL": "NVL",
|
||||||
|
"NCCL_DEBUG": "INFO",
|
||||||
|
**current_env,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
finally:
|
finally:
|
||||||
os.kill(vllm_process_id, 9)
|
recursive_kill(vllm_process)
|
||||||
|
|||||||
77
tests/e2e/patched/test_activation_checkpointing.py
Normal file
77
tests/e2e/patched/test_activation_checkpointing.py
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
"""
|
||||||
|
E2E tests for activation checkpointing
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import transformers
|
||||||
|
from torch.utils.checkpoint import checkpoint
|
||||||
|
|
||||||
|
from axolotl.cli.args import TrainerCliArgs
|
||||||
|
from axolotl.common.datasets import load_datasets
|
||||||
|
from axolotl.train import train
|
||||||
|
from axolotl.utils.config import normalize_config, validate_config
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
|
from ..utils import check_model_output_exists
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def fix_checkpoint_after_test():
|
||||||
|
yield
|
||||||
|
transformers.modeling_utils.checkpoint = checkpoint
|
||||||
|
|
||||||
|
|
||||||
|
class TestActivationCheckpointing:
|
||||||
|
"""
|
||||||
|
E2E tests for activation checkpointing
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_activation_checkpointing_offload(
|
||||||
|
self,
|
||||||
|
temp_dir,
|
||||||
|
fix_checkpoint_after_test, # pylint: disable=unused-argument,redefined-outer-name
|
||||||
|
):
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||||
|
"sequence_len": 1024,
|
||||||
|
"val_set_size": 0.0,
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|endoftext|>",
|
||||||
|
"eos_token": "<|im_end|>",
|
||||||
|
},
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"chat_template": "chatml",
|
||||||
|
"path": "mlabonne/FineTome-100k",
|
||||||
|
"type": "chat_template",
|
||||||
|
"split": "train[:10%]",
|
||||||
|
"field_messages": "conversations",
|
||||||
|
"message_field_role": "from",
|
||||||
|
"message_field_content": "value",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"num_epochs": 1,
|
||||||
|
"max_steps": 5,
|
||||||
|
"micro_batch_size": 1,
|
||||||
|
"gradient_accumulation_steps": 1,
|
||||||
|
"output_dir": temp_dir,
|
||||||
|
"learning_rate": 0.00001,
|
||||||
|
"optimizer": "adamw_8bit",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"flash_attention": True,
|
||||||
|
"sample_packing": True,
|
||||||
|
"bf16": True,
|
||||||
|
"save_safetensors": True,
|
||||||
|
"gradient_checkpointing": "offload",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
cfg = validate_config(cfg)
|
||||||
|
normalize_config(cfg)
|
||||||
|
cli_args = TrainerCliArgs()
|
||||||
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
|
check_model_output_exists(temp_dir, cfg)
|
||||||
@@ -99,6 +99,7 @@ class TestMixtral(unittest.TestCase):
|
|||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
cfg = validate_config(cfg)
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|||||||
@@ -2,14 +2,19 @@
|
|||||||
|
|
||||||
# pylint: disable=redefined-outer-name,unused-argument
|
# pylint: disable=redefined-outer-name,unused-argument
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import sys
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import torch
|
import torch
|
||||||
from accelerate.state import PartialState
|
from accelerate.state import PartialState
|
||||||
|
|
||||||
|
from axolotl.core.trainers.mixins.sequence_parallel import apply_sequence_parallelism
|
||||||
from axolotl.monkeypatch.attention.ring_attn import (
|
from axolotl.monkeypatch.attention.ring_attn import (
|
||||||
|
RingAttnFunc,
|
||||||
get_ring_attn_group,
|
get_ring_attn_group,
|
||||||
|
register_ring_attn,
|
||||||
set_ring_attn_group,
|
set_ring_attn_group,
|
||||||
)
|
)
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
@@ -47,6 +52,27 @@ def fixture_cfg():
|
|||||||
return cfg
|
return cfg
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def sequence_parallel_batch():
|
||||||
|
"""Create a test batch for sequence parallelism tests."""
|
||||||
|
batch_size = 1
|
||||||
|
seq_len = 8
|
||||||
|
|
||||||
|
# Create test tensors
|
||||||
|
input_ids = torch.arange(batch_size * seq_len).reshape(batch_size, seq_len)
|
||||||
|
attention_mask = torch.ones(batch_size, seq_len)
|
||||||
|
position_ids = torch.arange(seq_len).expand(batch_size, seq_len)
|
||||||
|
|
||||||
|
# Create test batch
|
||||||
|
batch = {
|
||||||
|
"input_ids": input_ids,
|
||||||
|
"attention_mask": attention_mask,
|
||||||
|
"position_ids": position_ids,
|
||||||
|
}
|
||||||
|
|
||||||
|
return batch
|
||||||
|
|
||||||
|
|
||||||
class TestRingAttention:
|
class TestRingAttention:
|
||||||
"""Tests for the ring attention functionality."""
|
"""Tests for the ring attention functionality."""
|
||||||
|
|
||||||
@@ -73,11 +99,6 @@ class TestRingAttention:
|
|||||||
self, mock_world_size, mock_rank, mock_new_group, partial_state
|
self, mock_world_size, mock_rank, mock_new_group, partial_state
|
||||||
):
|
):
|
||||||
"""Test that ring attention groups are created correctly."""
|
"""Test that ring attention groups are created correctly."""
|
||||||
from axolotl.monkeypatch.attention.ring_attn import (
|
|
||||||
RingAttnFunc,
|
|
||||||
register_ring_attn,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Setup mocks
|
# Setup mocks
|
||||||
mock_world_size.return_value = 8 # 8 GPUs total
|
mock_world_size.return_value = 8 # 8 GPUs total
|
||||||
mock_rank.return_value = 3 # GPU #3
|
mock_rank.return_value = 3 # GPU #3
|
||||||
@@ -101,88 +122,303 @@ class TestRingAttention:
|
|||||||
set_ring_attn_group(None)
|
set_ring_attn_group(None)
|
||||||
|
|
||||||
|
|
||||||
# Mock a simplified DataCollator test
|
class TestConfigValidation:
|
||||||
@patch("axolotl.monkeypatch.attention.ring_attn.get_ring_attn_group")
|
"""Tests for validating sequence parallelism configurations."""
|
||||||
@patch("torch.distributed.get_rank")
|
|
||||||
@patch("torch.distributed.get_world_size")
|
|
||||||
def test_sequence_parallel_slicing(
|
|
||||||
mock_world_size, mock_rank, mock_get_group, partial_state
|
|
||||||
):
|
|
||||||
"""Test the basic sequence slicing logic without full collator instantiation."""
|
|
||||||
# Setup mocks
|
|
||||||
mock_get_group.return_value = MagicMock()
|
|
||||||
mock_rank.return_value = 1 # Second GPU
|
|
||||||
mock_world_size.return_value = 4 # 4 GPUs total
|
|
||||||
|
|
||||||
# Create a sample batch
|
@pytest.fixture(autouse=True)
|
||||||
batch = {
|
def setup_mocks(self, monkeypatch):
|
||||||
"input_ids": torch.tensor(
|
"""Set up mocks for all tests in this class."""
|
||||||
[
|
# Mock the ring_flash_attn module
|
||||||
[101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112],
|
monkeypatch.setitem(sys.modules, "ring_flash_attn", MagicMock())
|
||||||
[201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212],
|
|
||||||
]
|
|
||||||
),
|
|
||||||
"attention_mask": torch.ones(2, 12),
|
|
||||||
}
|
|
||||||
|
|
||||||
# Simplified slicing logic from SequenceParallelDataCollator
|
@pytest.fixture
|
||||||
def slice_batch(batch, rank, world_size):
|
def base_cfg(self):
|
||||||
result = {}
|
"""Create a base configuration for testing."""
|
||||||
for key in batch:
|
return DictDefault(
|
||||||
seq_len = batch[key].shape[1]
|
{
|
||||||
slice_size = seq_len // world_size
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||||
start_idx = rank * slice_size
|
"datasets": [{"path": "mhenrichsen/alpaca_2k_test", "type": "alpaca"}],
|
||||||
end_idx = start_idx + slice_size if rank < world_size - 1 else seq_len
|
"micro_batch_size": 1,
|
||||||
result[key] = batch[key][:, start_idx:end_idx]
|
"gradient_accumulation_steps": 1,
|
||||||
return result
|
"learning_rate": 1e-3,
|
||||||
|
"output_dir": "./model-out",
|
||||||
|
"sequence_len": 512,
|
||||||
|
"special_tokens": {"pad_token": "<|endoftext|>"},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
# Slice the batch
|
@pytest.mark.parametrize(
|
||||||
result = slice_batch(
|
"config_updates, expected_values, should_pass, error_msg",
|
||||||
batch, rank=mock_rank.return_value, world_size=mock_world_size.return_value
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check slicing
|
|
||||||
assert result["input_ids"].shape == (2, 3) # 12 tokens / 4 GPUs = 3 tokens per GPU
|
|
||||||
expected_input_ids = torch.tensor(
|
|
||||||
[
|
[
|
||||||
[104, 105, 106], # Second slice of first sequence
|
# Valid configuration
|
||||||
[204, 205, 206], # Second slice of second sequence
|
(
|
||||||
]
|
{"sequence_parallel_degree": 2, "flash_attention": True},
|
||||||
|
{"sequence_parallel_degree": 2, "flash_attention": True},
|
||||||
|
True,
|
||||||
|
None,
|
||||||
|
),
|
||||||
|
# Default sequence_parallel_degree
|
||||||
|
({}, {"sequence_parallel_degree": 1}, True, None),
|
||||||
|
# Invalid: sequence_parallel_degree > 1 without flash_attention
|
||||||
|
(
|
||||||
|
{"sequence_parallel_degree": 2, "flash_attention": False},
|
||||||
|
None,
|
||||||
|
False,
|
||||||
|
"flash_attention: true must be set",
|
||||||
|
),
|
||||||
|
# Invalid: sequence_parallel_degree > 1 with sample_packing and micro_batch_size > 1
|
||||||
|
(
|
||||||
|
{
|
||||||
|
"sequence_parallel_degree": 2,
|
||||||
|
"flash_attention": True,
|
||||||
|
"sample_packing": True,
|
||||||
|
"micro_batch_size": 2,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
|
},
|
||||||
|
None,
|
||||||
|
False,
|
||||||
|
"micro_batch_size must be set to 1",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
ids=[
|
||||||
|
"valid_config",
|
||||||
|
"default_sp_degree",
|
||||||
|
"without_flash_attention",
|
||||||
|
"sample_packing_with_large_batch",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
assert torch.all(result["input_ids"] == expected_input_ids)
|
def test_sequence_parallel_config_validation(
|
||||||
|
self, base_cfg, config_updates, expected_values, should_pass, error_msg
|
||||||
|
):
|
||||||
|
"""Test various sequence parallelism configuration scenarios."""
|
||||||
|
from axolotl.utils.schemas.config import AxolotlInputConfig
|
||||||
|
|
||||||
|
# Apply updates to base config
|
||||||
|
cfg = base_cfg
|
||||||
|
cfg.update(config_updates)
|
||||||
|
|
||||||
|
if should_pass:
|
||||||
|
# Should validate without errors
|
||||||
|
config = AxolotlInputConfig(**cfg)
|
||||||
|
|
||||||
|
# Check expected values
|
||||||
|
for key, value in expected_values.items():
|
||||||
|
assert getattr(config, key) == value
|
||||||
|
else:
|
||||||
|
# Should raise exception
|
||||||
|
with pytest.raises(ValueError) as excinfo:
|
||||||
|
AxolotlInputConfig(**cfg)
|
||||||
|
assert error_msg in str(excinfo.value)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"ring_attn_func, sample_packing, expected_func",
|
||||||
|
[
|
||||||
|
(None, True, RingAttnFunc.VARLEN_LLAMA3),
|
||||||
|
(None, False, RingAttnFunc.BATCH_RING),
|
||||||
|
],
|
||||||
|
ids=["default_with_sample_packing", "default_without_sample_packing"],
|
||||||
|
)
|
||||||
|
def test_ring_attn_func_validation(
|
||||||
|
self, base_cfg, ring_attn_func, sample_packing, expected_func
|
||||||
|
):
|
||||||
|
"""Test ring_attn_func validation and defaults."""
|
||||||
|
from axolotl.utils.schemas.config import AxolotlInputConfig
|
||||||
|
|
||||||
|
# Apply updates to base config
|
||||||
|
cfg = base_cfg | {
|
||||||
|
"sequence_parallel_degree": 2,
|
||||||
|
"flash_attention": True,
|
||||||
|
"sample_packing": sample_packing,
|
||||||
|
}
|
||||||
|
|
||||||
|
if ring_attn_func is not None:
|
||||||
|
cfg["ring_attn_func"] = ring_attn_func
|
||||||
|
|
||||||
|
# Should validate without errors
|
||||||
|
config = AxolotlInputConfig(**cfg)
|
||||||
|
|
||||||
|
# Check ring_attn_func value
|
||||||
|
assert config.ring_attn_func.value == expected_func
|
||||||
|
|
||||||
|
def test_invalid_ring_attn_func(self, base_cfg):
|
||||||
|
"""Test that an invalid ring_attn_func is rejected."""
|
||||||
|
from axolotl.utils.schemas.config import AxolotlInputConfig
|
||||||
|
|
||||||
|
# Invalid configuration with invalid ring_attn_func
|
||||||
|
cfg = base_cfg | {
|
||||||
|
"sequence_parallel_degree": 2,
|
||||||
|
"flash_attention": True,
|
||||||
|
"ring_attn_func": "INVALID_FUNC",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Should raise ValidationError
|
||||||
|
with pytest.raises(ValueError) as excinfo:
|
||||||
|
AxolotlInputConfig(**cfg)
|
||||||
|
|
||||||
|
# Verify error message
|
||||||
|
assert "ring_attn_func: INVALID_FUNC must be in" in str(excinfo.value)
|
||||||
|
|
||||||
|
|
||||||
@patch.dict("sys.modules", {"ring_flash_attn": MagicMock()})
|
class TestApplySequenceParallelism:
|
||||||
def test_config_validation_with_valid_inputs(cfg):
|
"""Tests for the apply_sequence_parallelism function."""
|
||||||
"""Test that valid sequence parallelism configurations pass validation."""
|
|
||||||
# Import the actual model class with appropriate mocks
|
|
||||||
from axolotl.utils.schemas.config import AxolotlInputConfig
|
|
||||||
|
|
||||||
# Valid configuration: sequence_parallel_degree > 1 and flash_attention is True
|
@pytest.fixture(autouse=True)
|
||||||
cfg = cfg | {
|
def mock_distributed(self, monkeypatch):
|
||||||
"sequence_parallel_degree": 2,
|
"""Mock torch.distributed functions for testing."""
|
||||||
"flash_attention": True,
|
# Mock is_initialized to return True
|
||||||
}
|
monkeypatch.setattr(torch.distributed, "is_initialized", lambda: True)
|
||||||
|
|
||||||
# Should validate without errors
|
# Mock get_rank to return 0 by default
|
||||||
config = AxolotlInputConfig(**cfg)
|
monkeypatch.setattr(torch.distributed, "get_rank", lambda *args, **kwargs: 0)
|
||||||
assert config.sequence_parallel_degree == 2
|
|
||||||
assert config.flash_attention is True
|
|
||||||
|
|
||||||
|
# Mock get_world_size to return 2 by default
|
||||||
|
monkeypatch.setattr(
|
||||||
|
torch.distributed, "get_world_size", lambda *args, **kwargs: 2
|
||||||
|
)
|
||||||
|
|
||||||
def test_config_validation_with_invalid_inputs(cfg):
|
# Mock the process group
|
||||||
"""Test that invalid sequence parallelism configurations fail validation."""
|
monkeypatch.setattr(
|
||||||
from axolotl.utils.schemas.config import AxolotlInputConfig
|
"axolotl.monkeypatch.attention.ring_attn.get_ring_attn_group",
|
||||||
|
MagicMock,
|
||||||
|
)
|
||||||
|
|
||||||
# Invalid configuration: sequence_parallel_degree > 1 but flash_attention is False
|
# Mock update_ring_attn_params
|
||||||
cfg = cfg | {
|
monkeypatch.setattr(
|
||||||
"sequence_parallel_degree": 2,
|
"axolotl.monkeypatch.attention.ring_attn.update_ring_attn_params",
|
||||||
"flash_attention": False,
|
lambda **kwargs: None,
|
||||||
}
|
)
|
||||||
|
|
||||||
# Should raise ValidationError
|
def test_world_size_one(self, sequence_parallel_batch):
|
||||||
with pytest.raises(ValueError) as excinfo:
|
"""Test that function returns original batch when world size is 1."""
|
||||||
AxolotlInputConfig(**cfg)
|
result = apply_sequence_parallelism(
|
||||||
|
batch=sequence_parallel_batch,
|
||||||
|
local_rank=0,
|
||||||
|
local_world_size=1,
|
||||||
|
ring_attn_func=RingAttnFunc.BATCH_RING,
|
||||||
|
)
|
||||||
|
|
||||||
# Verify error message
|
# Should return the original batch unchanged
|
||||||
assert "flash_attention: true must be set" in str(excinfo.value)
|
assert result == sequence_parallel_batch
|
||||||
|
|
||||||
|
def test_batch_ring_rank0(self, sequence_parallel_batch):
|
||||||
|
"""Test BATCH_RING sharding for rank 0 in a 2-process group."""
|
||||||
|
batch = sequence_parallel_batch
|
||||||
|
seq_len = batch["input_ids"].size(1)
|
||||||
|
|
||||||
|
result = apply_sequence_parallelism(
|
||||||
|
batch=batch,
|
||||||
|
local_rank=0,
|
||||||
|
local_world_size=2,
|
||||||
|
ring_attn_func=RingAttnFunc.BATCH_RING,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check that sequence dimension was sharded correctly
|
||||||
|
assert result["input_ids"].shape[1] == seq_len // 2
|
||||||
|
assert result["attention_mask"].shape[1] == seq_len // 2
|
||||||
|
|
||||||
|
# Verify content: rank 0 should get the first half of the sequence
|
||||||
|
assert torch.equal(result["input_ids"], batch["input_ids"][:, : seq_len // 2])
|
||||||
|
assert torch.equal(
|
||||||
|
result["position_ids"], batch["position_ids"][:, : seq_len // 2]
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_batch_ring_rank1(self, sequence_parallel_batch):
|
||||||
|
"""Test BATCH_RING sharding for rank 1 in a 2-process group."""
|
||||||
|
batch = sequence_parallel_batch
|
||||||
|
seq_len = batch["input_ids"].size(1)
|
||||||
|
original_input_ids = batch["input_ids"].clone()
|
||||||
|
|
||||||
|
result = apply_sequence_parallelism(
|
||||||
|
batch=batch,
|
||||||
|
local_rank=1,
|
||||||
|
local_world_size=2,
|
||||||
|
ring_attn_func=RingAttnFunc.BATCH_RING,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify content: rank 1 should get the second half of the sequence
|
||||||
|
assert torch.equal(result["input_ids"], original_input_ids[:, seq_len // 2 :])
|
||||||
|
|
||||||
|
def test_batch_zigzag(self, sequence_parallel_batch):
|
||||||
|
"""Test BATCH_ZIGZAG sharding pattern."""
|
||||||
|
batch = sequence_parallel_batch
|
||||||
|
original_input_ids = batch["input_ids"].clone()
|
||||||
|
seq_len = batch["input_ids"].size(1)
|
||||||
|
|
||||||
|
# Test rank 0
|
||||||
|
result_rank0 = apply_sequence_parallelism(
|
||||||
|
batch={k: v.clone() for k, v in batch.items()},
|
||||||
|
local_rank=0,
|
||||||
|
local_world_size=2,
|
||||||
|
ring_attn_func=RingAttnFunc.BATCH_ZIGZAG,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test rank 1
|
||||||
|
result_rank1 = apply_sequence_parallelism(
|
||||||
|
batch={k: v.clone() for k, v in batch.items()},
|
||||||
|
local_rank=1,
|
||||||
|
local_world_size=2,
|
||||||
|
ring_attn_func=RingAttnFunc.BATCH_ZIGZAG,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Checks for both ranks
|
||||||
|
assert result_rank0["input_ids"].shape[1] == seq_len // 2
|
||||||
|
assert result_rank1["input_ids"].shape[1] == seq_len // 2
|
||||||
|
|
||||||
|
# For a 2-rank system with 8 tokens, check specific zigzag pattern
|
||||||
|
# Rank 0 should get chunks [0, 1] and [6, 7]
|
||||||
|
# Rank 1 should get chunks [2, 3] and [4, 5]
|
||||||
|
if seq_len == 8:
|
||||||
|
# Create expected tensors for comparison
|
||||||
|
rank0_expected = torch.cat(
|
||||||
|
[original_input_ids[:, :2], original_input_ids[:, 6:8]], dim=1
|
||||||
|
)
|
||||||
|
|
||||||
|
rank1_expected = torch.cat(
|
||||||
|
[original_input_ids[:, 2:4], original_input_ids[:, 4:6]], dim=1
|
||||||
|
)
|
||||||
|
|
||||||
|
assert torch.equal(result_rank0["input_ids"], rank0_expected)
|
||||||
|
assert torch.equal(result_rank1["input_ids"], rank1_expected)
|
||||||
|
|
||||||
|
def test_partial_application(self, sequence_parallel_batch):
|
||||||
|
"""Test that we can create a partially applied version of the function."""
|
||||||
|
batch = sequence_parallel_batch
|
||||||
|
original_input_ids = batch["input_ids"].clone()
|
||||||
|
|
||||||
|
# Create a partially applied function
|
||||||
|
rank0_ring_parallel = functools.partial(
|
||||||
|
apply_sequence_parallelism,
|
||||||
|
local_rank=0,
|
||||||
|
local_world_size=2,
|
||||||
|
ring_attn_func=RingAttnFunc.BATCH_RING,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use the partially applied function
|
||||||
|
result = rank0_ring_parallel(batch=batch)
|
||||||
|
|
||||||
|
# Verify it works as expected
|
||||||
|
assert result["input_ids"].shape[1] == original_input_ids.shape[1] // 2
|
||||||
|
assert torch.equal(
|
||||||
|
result["input_ids"],
|
||||||
|
original_input_ids[:, : original_input_ids.shape[1] // 2],
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_missing_position_ids(self, sequence_parallel_batch):
|
||||||
|
"""Test handling of batch without position_ids."""
|
||||||
|
# Create a batch without position_ids
|
||||||
|
batch = {
|
||||||
|
k: v for k, v in sequence_parallel_batch.items() if k != "position_ids"
|
||||||
|
}
|
||||||
|
original_input_ids = batch["input_ids"].clone()
|
||||||
|
|
||||||
|
# This should run without error even though position_ids is missing
|
||||||
|
result = apply_sequence_parallelism(
|
||||||
|
batch=batch,
|
||||||
|
local_rank=0,
|
||||||
|
local_world_size=2,
|
||||||
|
ring_attn_func=RingAttnFunc.BATCH_RING,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verification should pass
|
||||||
|
assert "position_ids" not in result
|
||||||
|
assert result["input_ids"].shape[1] == original_input_ids.shape[1] // 2
|
||||||
|
|||||||
@@ -109,6 +109,24 @@ def require_vllm(test_case):
|
|||||||
)(test_case)
|
)(test_case)
|
||||||
|
|
||||||
|
|
||||||
|
def require_llmcompressor(test_case):
|
||||||
|
"""
|
||||||
|
Decorator marking a test that requires a llmcompressor to be installed
|
||||||
|
"""
|
||||||
|
|
||||||
|
def is_llmcompressor_installed():
|
||||||
|
try:
|
||||||
|
import llmcompressor # pylint: disable=unused-import # noqa: F401
|
||||||
|
|
||||||
|
return True
|
||||||
|
except ImportError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return unittest.skipUnless(
|
||||||
|
is_llmcompressor_installed(), "test requires a llmcompressor to be installed"
|
||||||
|
)(test_case)
|
||||||
|
|
||||||
|
|
||||||
def is_hopper():
|
def is_hopper():
|
||||||
compute_capability = torch.cuda.get_device_capability()
|
compute_capability = torch.cuda.get_device_capability()
|
||||||
return compute_capability == (9, 0)
|
return compute_capability == (9, 0)
|
||||||
|
|||||||
Reference in New Issue
Block a user