Compare commits
47 Commits
offload-ac
...
v0.9.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8cda9e93c1 | ||
|
|
17d715c2b3 | ||
|
|
f943306263 | ||
|
|
3c8b9b33d6 | ||
|
|
8b0c2a71ad | ||
|
|
493910559a | ||
|
|
c54534dbfa | ||
|
|
cae5cebb59 | ||
|
|
fcbd7477d0 | ||
|
|
038db85a40 | ||
|
|
680dcc5a4d | ||
|
|
fed5ca8254 | ||
|
|
7a2d017c88 | ||
|
|
8c0303aa5e | ||
|
|
5d61169f7c | ||
|
|
e1586f7919 | ||
|
|
e4bf3ffb17 | ||
|
|
30150fe1e1 | ||
|
|
7f7d7ade2e | ||
|
|
776cf70fe4 | ||
|
|
8730951aba | ||
|
|
e72c11ad55 | ||
|
|
1a7978b960 | ||
|
|
60b0d14f1d | ||
|
|
a7a40378f5 | ||
|
|
b50d35bec9 | ||
|
|
bc6dfa6899 | ||
|
|
9d6e8af622 | ||
|
|
17b441248c | ||
|
|
d49a4268b8 | ||
|
|
1d6e931115 | ||
|
|
ff106ace44 | ||
|
|
24907533d1 | ||
|
|
0e9d816d2e | ||
|
|
72f142186a | ||
|
|
87726322bf | ||
|
|
ae8ae7534c | ||
|
|
ee00142cb5 | ||
|
|
097e7e3b5b | ||
|
|
c714958181 | ||
|
|
4402c293dc | ||
|
|
0d71f787a3 | ||
|
|
c337ca0872 | ||
|
|
f04f7cf5ad | ||
|
|
c64a951bc9 | ||
|
|
fc88cc56cb | ||
|
|
e85cbb8645 |
12
.github/workflows/tests.yml
vendored
12
.github/workflows/tests.yml
vendored
@@ -329,18 +329,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.6.0
|
|
||||||
num_gpus: 1
|
|
||||||
axolotl_extras: llmcompressor
|
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
num_gpus: 1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
@@ -49,8 +49,7 @@ sections = [
|
|||||||
("Knowledge Distillation (KD)", "kd"),
|
("Knowledge Distillation (KD)", "kd"),
|
||||||
("Liger Kernels", "liger"),
|
("Liger Kernels", "liger"),
|
||||||
("Language Model Evaluation Harness (LM Eval)", "lm_eval"),
|
("Language Model Evaluation Harness (LM Eval)", "lm_eval"),
|
||||||
("Spectrum", "spectrum"),
|
("Spectrum", "spectrum")
|
||||||
("LLMCompressor", "llm_compressor")
|
|
||||||
]
|
]
|
||||||
|
|
||||||
for section_name, folder_name in sections:
|
for section_name, folder_name in sections:
|
||||||
|
|||||||
@@ -1,77 +0,0 @@
|
|||||||
base_model: neuralmagic/Sparse-Llama-3.1-8B-2of4
|
|
||||||
|
|
||||||
plugins:
|
|
||||||
- axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
|
||||||
|
|
||||||
load_in_8bit: false
|
|
||||||
load_in_4bit: false
|
|
||||||
strict: false
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: tatsu-lab/alpaca
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path: last_run_prepared
|
|
||||||
val_set_size: 0.05
|
|
||||||
output_dir: ./outputs/out
|
|
||||||
|
|
||||||
sequence_len: 4096
|
|
||||||
sample_packing: true
|
|
||||||
pad_to_sequence_len: true
|
|
||||||
eval_sample_packing: false
|
|
||||||
|
|
||||||
wandb_project:
|
|
||||||
wandb_entity:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_name:
|
|
||||||
wandb_log_model:
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 8
|
|
||||||
micro_batch_size: 1
|
|
||||||
num_epochs: 1
|
|
||||||
optimizer: paged_adamw_8bit
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 2e-5
|
|
||||||
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: auto
|
|
||||||
fp16:
|
|
||||||
tf32: false
|
|
||||||
|
|
||||||
gradient_checkpointing: true
|
|
||||||
gradient_checkpointing_kwargs:
|
|
||||||
use_reentrant: false
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
logging_steps: 1
|
|
||||||
xformers_attention:
|
|
||||||
flash_attention: true
|
|
||||||
|
|
||||||
warmup_steps: 100
|
|
||||||
evals_per_epoch: 2
|
|
||||||
eval_table_size:
|
|
||||||
saves_per_epoch: 1
|
|
||||||
debug:
|
|
||||||
deepspeed:
|
|
||||||
weight_decay: 0.0
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
special_tokens:
|
|
||||||
pad_token: <|end_of_text|>
|
|
||||||
|
|
||||||
llmcompressor:
|
|
||||||
recipe:
|
|
||||||
finetuning_stage:
|
|
||||||
finetuning_modifiers:
|
|
||||||
ConstantPruningModifier:
|
|
||||||
targets: [
|
|
||||||
're:.*q_proj.weight',
|
|
||||||
're:.*k_proj.weight',
|
|
||||||
're:.*v_proj.weight',
|
|
||||||
're:.*o_proj.weight',
|
|
||||||
're:.*gate_proj.weight',
|
|
||||||
're:.*up_proj.weight',
|
|
||||||
're:.*down_proj.weight',
|
|
||||||
]
|
|
||||||
start: 0
|
|
||||||
save_compressed: true
|
|
||||||
3
setup.py
3
setup.py
@@ -150,9 +150,6 @@ extras_require = {
|
|||||||
"vllm": [
|
"vllm": [
|
||||||
"vllm==0.7.2",
|
"vllm==0.7.2",
|
||||||
],
|
],
|
||||||
"llmcompressor": [
|
|
||||||
"llmcompressor==0.5.1",
|
|
||||||
],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
install_requires, dependency_links, extras_require_build = parse_requirements(
|
install_requires, dependency_links, extras_require_build = parse_requirements(
|
||||||
|
|||||||
@@ -4,4 +4,4 @@ import pkgutil
|
|||||||
|
|
||||||
__path__ = pkgutil.extend_path(__path__, __name__) # Make this a namespace package
|
__path__ = pkgutil.extend_path(__path__, __name__) # Make this a namespace package
|
||||||
|
|
||||||
__version__ = "0.10.0.dev0"
|
__version__ = "0.9.1"
|
||||||
|
|||||||
@@ -1,108 +0,0 @@
|
|||||||
# LLMCompressor Integration
|
|
||||||
|
|
||||||
Fine-tune sparsified models in Axolotl using Neural Magic's [LLMCompressor](https://github.com/vllm-project/llm-compressor).
|
|
||||||
|
|
||||||
This integration enables fine-tuning of models sparsified using LLMCompressor within the Axolotl training framework. By combining LLMCompressor's model compression capabilities with Axolotl's distributed training pipelines, users can efficiently fine-tune sparse models at scale.
|
|
||||||
|
|
||||||
It uses Axolotl’s plugin system to hook into the fine-tuning flows while maintaining sparsity throughout training.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
- Axolotl with `llmcompressor` extras:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install "axolotl[llmcompressor]"
|
|
||||||
```
|
|
||||||
|
|
||||||
- Requires `llmcompressor >= 0.5.1`
|
|
||||||
|
|
||||||
This will install all necessary dependencies to fine-tune sparsified models using the integration.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
To enable sparse fine-tuning with this integration, include the plugin in your Axolotl config:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
plugins:
|
|
||||||
- axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
|
||||||
|
|
||||||
llmcompressor:
|
|
||||||
recipe:
|
|
||||||
finetuning_stage:
|
|
||||||
finetuning_modifiers:
|
|
||||||
ConstantPruningModifier:
|
|
||||||
targets: [
|
|
||||||
're:.*q_proj.weight',
|
|
||||||
're:.*k_proj.weight',
|
|
||||||
're:.*v_proj.weight',
|
|
||||||
're:.*o_proj.weight',
|
|
||||||
're:.*gate_proj.weight',
|
|
||||||
're:.*up_proj.weight',
|
|
||||||
're:.*down_proj.weight',
|
|
||||||
]
|
|
||||||
start: 0
|
|
||||||
save_compressed: true
|
|
||||||
# ... (other training arguments)
|
|
||||||
```
|
|
||||||
|
|
||||||
This plugin **does not apply pruning or sparsification itself** — it is intended for **fine-tuning models that have already been sparsified**.
|
|
||||||
|
|
||||||
Pre-sparsified checkpoints can be:
|
|
||||||
- Generated using [LLMCompressor](https://github.com/vllm-project/llm-compressor)
|
|
||||||
- Downloaded from [Neural Magic's Hugging Face page](https://huggingface.co/neuralmagic)
|
|
||||||
- Any custom LLM with compatible sparsity patterns that you've created yourself
|
|
||||||
|
|
||||||
To learn more about writing and customizing LLMCompressor recipes, refer to the official documentation:
|
|
||||||
[https://github.com/vllm-project/llm-compressor/blob/main/README.md](https://github.com/vllm-project/llm-compressor/blob/main/README.md)
|
|
||||||
|
|
||||||
### Storage Optimization with save_compressed
|
|
||||||
|
|
||||||
Setting `save_compressed: true` in your configuration enables saving models in a compressed format, which:
|
|
||||||
- Reduces disk space usage by approximately 40%
|
|
||||||
- Maintains compatibility with vLLM for accelerated inference
|
|
||||||
- Maintains compatibility with llmcompressor for further optimization (example: quantization)
|
|
||||||
|
|
||||||
This option is highly recommended when working with sparse models to maximize the benefits of model compression.
|
|
||||||
|
|
||||||
### Example Config
|
|
||||||
|
|
||||||
See [`examples/llama-3/sparse-finetuning.yaml`](examples/llama-3/sparse-finetuning.yaml) for a complete example.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Inference with vLLM
|
|
||||||
|
|
||||||
After fine-tuning your sparse model, you can leverage vLLM for efficient inference.
|
|
||||||
You can also use LLMCompressor to apply additional quantization to your fine-tuned
|
|
||||||
sparse model before inference for even greater performance benefits.:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from vllm import LLM, SamplingParams
|
|
||||||
|
|
||||||
prompts = [
|
|
||||||
"Hello, my name is",
|
|
||||||
"The president of the United States is",
|
|
||||||
"The capital of France is",
|
|
||||||
"The future of AI is",
|
|
||||||
]
|
|
||||||
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
|
|
||||||
llm = LLM("path/to/your/sparse/model")
|
|
||||||
outputs = llm.generate(prompts, sampling_params)
|
|
||||||
|
|
||||||
for output in outputs:
|
|
||||||
prompt = output.prompt
|
|
||||||
generated_text = output.outputs[0].text
|
|
||||||
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
|
||||||
```
|
|
||||||
|
|
||||||
For more details on vLLM's capabilities and advanced configuration options, see the [official vLLM documentation](https://docs.vllm.ai/).
|
|
||||||
|
|
||||||
## Learn More
|
|
||||||
|
|
||||||
For details on available sparsity and quantization schemes, fine-tuning recipes, and usage examples, visit the official LLMCompressor repository:
|
|
||||||
|
|
||||||
[https://github.com/vllm-project/llm-compressor](https://github.com/vllm-project/llm-compressor)
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
"""Integration entry point for the LLMCompressor plugin."""
|
|
||||||
|
|
||||||
from .plugin import LLMCompressorPlugin
|
|
||||||
|
|
||||||
__all__ = ["LLMCompressorPlugin"]
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
"""
|
|
||||||
LLMCompressor and Sparse Finetuning config models.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from typing_extensions import Annotated
|
|
||||||
|
|
||||||
|
|
||||||
class CompressionArgs(BaseModel):
|
|
||||||
"""Sparse Finetuning config for LLMCompressor."""
|
|
||||||
|
|
||||||
# Typing for recipe is set to Any due to:
|
|
||||||
# https://github.com/vllm-project/llm-compressor/issues/1319
|
|
||||||
recipe: Annotated[
|
|
||||||
Any,
|
|
||||||
Field(
|
|
||||||
description="The recipe containing the compression algorithms and hyperparameters to apply."
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
save_compressed: Annotated[
|
|
||||||
bool,
|
|
||||||
Field(
|
|
||||||
default=False,
|
|
||||||
description="Whether to save the compressed model after training.",
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class LLMCompressorArgs(BaseModel):
|
|
||||||
"""LLMCompressor configuration BaseModel."""
|
|
||||||
|
|
||||||
llmcompressor: Annotated[
|
|
||||||
CompressionArgs,
|
|
||||||
Field(
|
|
||||||
description="Arguments enabling compression pathways through the LLM Compressor plugins"
|
|
||||||
),
|
|
||||||
]
|
|
||||||
@@ -1,171 +0,0 @@
|
|||||||
"""
|
|
||||||
Sparse Finetuning plugin for Axolotl — enables handling of sparse neural networks
|
|
||||||
by maintaining masks for zero weights during training.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
from functools import wraps
|
|
||||||
from typing import Any, Callable, Concatenate, ParamSpec, TypeVar
|
|
||||||
|
|
||||||
from llmcompressor import active_session, create_session
|
|
||||||
from llmcompressor.core import callbacks as session_callbacks
|
|
||||||
from llmcompressor.recipe import Recipe
|
|
||||||
from torch.nn import Module
|
|
||||||
from transformers.trainer import Trainer
|
|
||||||
from transformers.trainer_callback import TrainerCallback, TrainerControl, TrainerState
|
|
||||||
from transformers.training_args import TrainingArguments
|
|
||||||
|
|
||||||
from axolotl.integrations.base import BasePlugin
|
|
||||||
|
|
||||||
P = ParamSpec("P") # Params for generic function signatures
|
|
||||||
R = TypeVar("R") # Return type for generic function signatures
|
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.integrations.llm_compressor")
|
|
||||||
|
|
||||||
|
|
||||||
class LLMCompressorCallbackHandler(TrainerCallback):
|
|
||||||
"""
|
|
||||||
Trainer callback for Sparse Finetuning.
|
|
||||||
Maintains sparsity patterns during training by applying masks after optimization steps,
|
|
||||||
ensuring zero-weight updates are canceled out.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, trainer: Trainer, recipe: Any):
|
|
||||||
"""
|
|
||||||
Initialize the Sparse Finetuning callback handler.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
trainer (Trainer): Huggingface Trainer instance.
|
|
||||||
recipe (Recipe | dict): Sparse finetuning recipe to apply.
|
|
||||||
"""
|
|
||||||
super().__init__()
|
|
||||||
self.trainer = trainer
|
|
||||||
self.recipe = (
|
|
||||||
Recipe.model_validate(recipe) if not isinstance(recipe, Recipe) else recipe
|
|
||||||
)
|
|
||||||
self.original_compute_loss = trainer.compute_loss
|
|
||||||
self.trainer.compute_loss = compute_loss_wrapper(self.trainer.compute_loss)
|
|
||||||
create_session()
|
|
||||||
|
|
||||||
def on_train_begin(
|
|
||||||
self,
|
|
||||||
args: TrainingArguments,
|
|
||||||
state: TrainerState,
|
|
||||||
control: TrainerControl,
|
|
||||||
**kwargs,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Called at the beginning of training. Initializes the compression session.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
args (TrainingArguments): Training arguments.
|
|
||||||
state (TrainerState): Trainer state.
|
|
||||||
control (TrainerControl): Trainer control.
|
|
||||||
"""
|
|
||||||
super().on_train_begin(args, state, control, **kwargs)
|
|
||||||
self.trainer.accelerator.wait_for_everyone()
|
|
||||||
active_session().initialize(
|
|
||||||
model=self.trainer.model,
|
|
||||||
optimizer=self.trainer.optimizer,
|
|
||||||
start=state.epoch,
|
|
||||||
recipe=self.recipe,
|
|
||||||
)
|
|
||||||
self.trainer.accelerator.wait_for_everyone()
|
|
||||||
|
|
||||||
def on_step_begin(
|
|
||||||
self,
|
|
||||||
args: TrainingArguments,
|
|
||||||
state: TrainerState,
|
|
||||||
control: TrainerControl,
|
|
||||||
**kwargs,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Called at the beginning of a training step. Triggers batch_start callback.
|
|
||||||
"""
|
|
||||||
super().on_step_begin(args, state, control, **kwargs)
|
|
||||||
session_callbacks.batch_start()
|
|
||||||
|
|
||||||
def on_step_end(
|
|
||||||
self,
|
|
||||||
args: TrainingArguments,
|
|
||||||
state: TrainerState,
|
|
||||||
control: TrainerControl,
|
|
||||||
**kwargs,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Called at the end of a training step. Triggers optimizer and batch_end callbacks.
|
|
||||||
"""
|
|
||||||
super().on_step_end(args, state, control, **kwargs)
|
|
||||||
session_callbacks.optim_pre_step()
|
|
||||||
session_callbacks.optim_post_step()
|
|
||||||
session_callbacks.batch_end()
|
|
||||||
|
|
||||||
def on_train_end(
|
|
||||||
self,
|
|
||||||
args: TrainingArguments,
|
|
||||||
state: TrainerState,
|
|
||||||
control: TrainerControl,
|
|
||||||
**kwargs,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Called at the end of training. Finalizes the compression session.
|
|
||||||
"""
|
|
||||||
super().on_train_end(args, state, control, **kwargs)
|
|
||||||
active_session().finalize()
|
|
||||||
self.trainer.compute_loss_func = self.original_compute_loss
|
|
||||||
|
|
||||||
|
|
||||||
class LLMCompressorPlugin(BasePlugin):
|
|
||||||
"""
|
|
||||||
Sparse Finetuning plugin for Axolotl integration.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_input_args(self) -> str:
|
|
||||||
"""
|
|
||||||
Returns the path to the plugin's argument definition.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: Dotted path to the LLMCompressorArgs class.
|
|
||||||
"""
|
|
||||||
return "axolotl.integrations.llm_compressor.args.LLMCompressorArgs"
|
|
||||||
|
|
||||||
def add_callbacks_post_trainer(self, cfg: Any, trainer: Trainer) -> list:
|
|
||||||
"""
|
|
||||||
Adds Sparse Finetuning callback to the Trainer instance.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cfg (Any): Configuration object containing the sparse recipe.
|
|
||||||
trainer (Trainer): Huggingface Trainer instance.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
list: List containing the configured callback instances.
|
|
||||||
"""
|
|
||||||
LOG.info("Adding Sparse Finetuning callback to the trainer")
|
|
||||||
callback = LLMCompressorCallbackHandler(
|
|
||||||
trainer=trainer,
|
|
||||||
recipe=cfg.llmcompressor.recipe,
|
|
||||||
)
|
|
||||||
return [callback]
|
|
||||||
|
|
||||||
|
|
||||||
def compute_loss_wrapper(
|
|
||||||
compute_loss_func: Callable[Concatenate[Module, P], R],
|
|
||||||
) -> Callable[Concatenate[Module, P], R]:
|
|
||||||
"""
|
|
||||||
Wraps the loss computation function to trigger the loss_calculated callback.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
compute_loss_func (Callable): Original loss computation function.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Callable: Wrapped function that also invokes the loss_calculated callback.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@wraps(compute_loss_func)
|
|
||||||
def compute_and_notify(model: Module, *args: P.args, **kwargs: P.kwargs) -> R:
|
|
||||||
loss = compute_loss_func(model, *args, **kwargs)
|
|
||||||
if active_session().lifecycle.initialized_ and model.training:
|
|
||||||
session_callbacks.loss_calculated(loss=loss)
|
|
||||||
return loss
|
|
||||||
|
|
||||||
return compute_and_notify
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
"""Utilities for llmcompressor integration with axolotl."""
|
|
||||||
|
|
||||||
from typing import Union
|
|
||||||
|
|
||||||
from llmcompressor.transformers.sparsification.compressed_tensors_utils import (
|
|
||||||
modify_save_pretrained,
|
|
||||||
)
|
|
||||||
from transformers import PreTrainedModel, Trainer
|
|
||||||
|
|
||||||
|
|
||||||
def save_compressed_model(
|
|
||||||
model: PreTrainedModel,
|
|
||||||
output_dir: Union[str, bytes],
|
|
||||||
trainer: Trainer,
|
|
||||||
safe_serialization: bool = False,
|
|
||||||
save_compressed: bool = False,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Synchronize processes, apply compression hooks, and save the model.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model (PreTrainedModel): The model to be saved.
|
|
||||||
output_dir (str or bytes): Path where the model files will be written.
|
|
||||||
trainer (Trainer): Hugging Face Trainer for process synchronization.
|
|
||||||
safe_serialization (bool): Use safe serialization if True.
|
|
||||||
save_compressed (bool): Write compressed tensors if True.
|
|
||||||
"""
|
|
||||||
trainer.accelerator.wait_for_everyone()
|
|
||||||
|
|
||||||
# Only the main process writes the files
|
|
||||||
if not trainer.accelerator.is_main_process:
|
|
||||||
return
|
|
||||||
|
|
||||||
modify_save_pretrained(model)
|
|
||||||
model.save_pretrained(
|
|
||||||
output_dir,
|
|
||||||
safe_serialization=safe_serialization,
|
|
||||||
save_compressed=save_compressed,
|
|
||||||
skip_sparsity_compression_stats=not save_compressed,
|
|
||||||
)
|
|
||||||
@@ -294,23 +294,8 @@ def save_trained_model(
|
|||||||
trainer.model.save_pretrained(
|
trainer.model.save_pretrained(
|
||||||
cfg.output_dir, safe_serialization=safe_serialization
|
cfg.output_dir, safe_serialization=safe_serialization
|
||||||
)
|
)
|
||||||
|
|
||||||
model.save_pretrained(cfg.output_dir, safe_serialization=safe_serialization)
|
model.save_pretrained(cfg.output_dir, safe_serialization=safe_serialization)
|
||||||
|
|
||||||
if hasattr(cfg, "llmcompressor") and cfg.llmcompressor:
|
|
||||||
# TODO: add integration support so this can be implemented completely within the plugin
|
|
||||||
from axolotl.integrations.llm_compressor.utils import (
|
|
||||||
save_compressed_model,
|
|
||||||
)
|
|
||||||
|
|
||||||
save_compressed_model(
|
|
||||||
model=model,
|
|
||||||
output_dir=cfg.output_dir,
|
|
||||||
trainer=trainer,
|
|
||||||
safe_serialization=safe_serialization,
|
|
||||||
save_compressed=cfg.llmcompressor.save_compressed,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def create_model_card(cfg: DictDefault, trainer: Trainer):
|
def create_model_card(cfg: DictDefault, trainer: Trainer):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -5,11 +5,8 @@ from functools import partial
|
|||||||
|
|
||||||
from packaging import version
|
from packaging import version
|
||||||
|
|
||||||
from axolotl.utils.gradient_checkpointing.offload_cpu import (
|
from axolotl.utils.gradient_checkpointing.unsloth import (
|
||||||
CPU_Offloaded_Gradient_Checkpointer,
|
Unsloth_Offloaded_Gradient_Checkpointer,
|
||||||
)
|
|
||||||
from axolotl.utils.gradient_checkpointing.offload_disk import (
|
|
||||||
DiskOffloadedGradientCheckpointer,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
transformers_version = version.parse(importlib.metadata.version("transformers"))
|
transformers_version = version.parse(importlib.metadata.version("transformers"))
|
||||||
@@ -29,31 +26,12 @@ def hf_grad_checkpoint_offload_wrapper(
|
|||||||
decoder_layer, *args, use_reentrant=None
|
decoder_layer, *args, use_reentrant=None
|
||||||
): # pylint: disable=unused-argument
|
): # pylint: disable=unused-argument
|
||||||
if uses_gc_layers(decoder_layer):
|
if uses_gc_layers(decoder_layer):
|
||||||
return CPU_Offloaded_Gradient_Checkpointer.apply(
|
return Unsloth_Offloaded_Gradient_Checkpointer.apply(
|
||||||
decoder_layer,
|
decoder_layer,
|
||||||
*args,
|
*args,
|
||||||
)
|
)
|
||||||
|
|
||||||
return CPU_Offloaded_Gradient_Checkpointer.apply(
|
return Unsloth_Offloaded_Gradient_Checkpointer.apply(
|
||||||
(
|
|
||||||
decoder_layer.func.__self__
|
|
||||||
if isinstance(decoder_layer, partial)
|
|
||||||
else decoder_layer.__self__
|
|
||||||
),
|
|
||||||
*args,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def hf_grad_checkpoint_disk_offload_wrapper(
|
|
||||||
decoder_layer, *args, use_reentrant=None
|
|
||||||
): # pylint: disable=unused-argument
|
|
||||||
if uses_gc_layers(decoder_layer):
|
|
||||||
return DiskOffloadedGradientCheckpointer.apply(
|
|
||||||
decoder_layer,
|
|
||||||
*args,
|
|
||||||
)
|
|
||||||
|
|
||||||
return DiskOffloadedGradientCheckpointer.apply(
|
|
||||||
(
|
(
|
||||||
decoder_layer.func.__self__
|
decoder_layer.func.__self__
|
||||||
if isinstance(decoder_layer, partial)
|
if isinstance(decoder_layer, partial)
|
||||||
|
|||||||
@@ -1,93 +0,0 @@
|
|||||||
"""Disk offloaded checkpointing"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
import torch
|
|
||||||
|
|
||||||
torch_cuda_amp_custom_fwd = torch.amp.custom_fwd(device_type="cuda")
|
|
||||||
torch_cuda_amp_custom_bwd = torch.amp.custom_bwd(device_type="cuda")
|
|
||||||
|
|
||||||
|
|
||||||
class DiskOffloadedGradientCheckpointer(torch.autograd.Function):
|
|
||||||
"""
|
|
||||||
Saves both VRAM and RAM by offloading activations to disk.
|
|
||||||
Greater hit to performance than RAM offloading, but useful for extremely memory-constrained environments.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Create a temporary directory for storing tensors
|
|
||||||
_temp_dir = tempfile.mkdtemp(prefix="disk_checkpoint_")
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _get_temp_file_path():
|
|
||||||
"""Generate a unique file path for tensor storage"""
|
|
||||||
return os.path.join(
|
|
||||||
DiskOffloadedGradientCheckpointer._temp_dir, f"{uuid.uuid4()}.pt"
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@torch_cuda_amp_custom_fwd
|
|
||||||
def forward(ctx, forward_function, hidden_states, *args):
|
|
||||||
# Generate a unique file path for this tensor
|
|
||||||
file_path = DiskOffloadedGradientCheckpointer._get_temp_file_path()
|
|
||||||
|
|
||||||
# Save tensor to disk in a non-blocking way (detached from compute)
|
|
||||||
# First move to CPU, then save
|
|
||||||
cpu_hidden_states = hidden_states.detach().cpu()
|
|
||||||
torch.save(cpu_hidden_states, file_path)
|
|
||||||
|
|
||||||
# Free CPU memory
|
|
||||||
del cpu_hidden_states
|
|
||||||
|
|
||||||
# Run forward pass
|
|
||||||
with torch.no_grad():
|
|
||||||
output = forward_function(hidden_states, *args)
|
|
||||||
|
|
||||||
# Store the path instead of the tensor
|
|
||||||
ctx.save_for_backward(torch.tensor([0])) # Dummy tensor
|
|
||||||
ctx.file_path = file_path
|
|
||||||
ctx.forward_function = forward_function
|
|
||||||
ctx.args = args
|
|
||||||
return output
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@torch_cuda_amp_custom_bwd
|
|
||||||
def backward(ctx, dY): # pylint: disable=invalid-name
|
|
||||||
# Load the hidden states from disk
|
|
||||||
hidden_states = torch.load(ctx.file_path, weights_only=True)
|
|
||||||
|
|
||||||
# Move to CUDA and prepare for gradient computation
|
|
||||||
hidden_states = hidden_states.to("cuda", non_blocking=True).detach()
|
|
||||||
hidden_states.requires_grad = True
|
|
||||||
|
|
||||||
# Clean up the temporary file
|
|
||||||
try:
|
|
||||||
os.remove(ctx.file_path)
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass # Ignore errors in file deletion
|
|
||||||
|
|
||||||
# Compute gradients
|
|
||||||
with torch.enable_grad():
|
|
||||||
output = ctx.forward_function(hidden_states, *ctx.args)
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
torch.autograd.backward(output, dY)
|
|
||||||
|
|
||||||
return (
|
|
||||||
None,
|
|
||||||
hidden_states.grad,
|
|
||||||
) + (
|
|
||||||
None,
|
|
||||||
) * len(ctx.args)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def cleanup():
|
|
||||||
"""Clean up the temporary directory when done"""
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
try:
|
|
||||||
shutil.rmtree(
|
|
||||||
DiskOffloadedGradientCheckpointer._temp_dir
|
|
||||||
) # pylint: disable=protected-access
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
"""CPU offloaded checkpointing"""
|
"""Unsloth checkpointing"""
|
||||||
|
|
||||||
# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
|
# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
|
||||||
#
|
#
|
||||||
@@ -26,7 +26,7 @@ else:
|
|||||||
torch_cuda_amp_custom_bwd = torch.amp.custom_bwd(device_type="cuda")
|
torch_cuda_amp_custom_bwd = torch.amp.custom_bwd(device_type="cuda")
|
||||||
|
|
||||||
|
|
||||||
class CPU_Offloaded_Gradient_Checkpointer( # pylint: disable=invalid-name
|
class Unsloth_Offloaded_Gradient_Checkpointer( # pylint: disable=invalid-name
|
||||||
torch.autograd.Function
|
torch.autograd.Function
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
@@ -70,10 +70,7 @@ from axolotl.utils.distributed import (
|
|||||||
is_local_main_process,
|
is_local_main_process,
|
||||||
is_main_process,
|
is_main_process,
|
||||||
)
|
)
|
||||||
from axolotl.utils.gradient_checkpointing import (
|
from axolotl.utils.gradient_checkpointing import hf_grad_checkpoint_offload_wrapper
|
||||||
hf_grad_checkpoint_disk_offload_wrapper,
|
|
||||||
hf_grad_checkpoint_offload_wrapper,
|
|
||||||
)
|
|
||||||
from axolotl.utils.lora_embeddings import get_linear_embedding_layers
|
from axolotl.utils.lora_embeddings import get_linear_embedding_layers
|
||||||
from axolotl.utils.model_shard_quant import load_sharded_model, load_sharded_model_quant
|
from axolotl.utils.model_shard_quant import load_sharded_model, load_sharded_model_quant
|
||||||
|
|
||||||
@@ -144,22 +141,6 @@ def check_model_config(cfg: DictDefault, model_config: PretrainedConfig):
|
|||||||
hasattr(model_config, "quantization_config")
|
hasattr(model_config, "quantization_config")
|
||||||
and model_config.quantization_config
|
and model_config.quantization_config
|
||||||
)
|
)
|
||||||
|
|
||||||
# Detect compressed-tensors config
|
|
||||||
is_compressed_tensors_config = (
|
|
||||||
quant_config_exists
|
|
||||||
and model_config.quantization_config.get("quant_method") == "compressed-tensors"
|
|
||||||
)
|
|
||||||
|
|
||||||
if is_compressed_tensors_config:
|
|
||||||
if model_config.quantization_config.get("config_groups"):
|
|
||||||
LOG.warning(
|
|
||||||
"Found `config_groups` in a compressed-tensors config. "
|
|
||||||
"QAT integration with llmcompressor is not tested."
|
|
||||||
)
|
|
||||||
# Skip further quant checks for compressed-tensors
|
|
||||||
return
|
|
||||||
|
|
||||||
quant_config_method_is_gptq = (
|
quant_config_method_is_gptq = (
|
||||||
quant_config_exists
|
quant_config_exists
|
||||||
and "quant_method" in model_config.quantization_config
|
and "quant_method" in model_config.quantization_config
|
||||||
@@ -622,10 +603,6 @@ class ModelLoader:
|
|||||||
|
|
||||||
if self.cfg.gradient_checkpointing in ["unsloth", "offload"]:
|
if self.cfg.gradient_checkpointing in ["unsloth", "offload"]:
|
||||||
transformers.modeling_utils.checkpoint = hf_grad_checkpoint_offload_wrapper
|
transformers.modeling_utils.checkpoint = hf_grad_checkpoint_offload_wrapper
|
||||||
if self.cfg.gradient_checkpointing == "offload_disk":
|
|
||||||
transformers.modeling_utils.checkpoint = (
|
|
||||||
hf_grad_checkpoint_disk_offload_wrapper
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.cfg.flash_attention:
|
if self.cfg.flash_attention:
|
||||||
self.patch_attention()
|
self.patch_attention()
|
||||||
|
|||||||
@@ -78,11 +78,15 @@ def pack_group(
|
|||||||
Returns:
|
Returns:
|
||||||
List of bins, where each bin contains indices of sequences assigned to it
|
List of bins, where each bin contains indices of sequences assigned to it
|
||||||
"""
|
"""
|
||||||
|
# Get sorting indices and sort lengths in descending order
|
||||||
|
indices = np.argsort(sequence_lengths)[::-1]
|
||||||
|
sorted_lengths = sequence_lengths[indices]
|
||||||
|
|
||||||
bins_remaining_space: list = [] # Tracks remaining capacity in each bin
|
bins_remaining_space: list = [] # Tracks remaining capacity in each bin
|
||||||
bins_assigned_sequences: list = [] # Tracks sequence indices assigned to each bin
|
bins_assigned_sequences: list = [] # Tracks sequence indices assigned to each bin
|
||||||
|
|
||||||
for seq_id, size in enumerate(sequence_lengths):
|
for seq_id, size in enumerate(sorted_lengths):
|
||||||
global_idx = seq_id + group_offset
|
global_idx = indices[seq_id] + group_offset
|
||||||
|
|
||||||
# Try to place sequence in existing bins
|
# Try to place sequence in existing bins
|
||||||
add_new_bin = True
|
add_new_bin = True
|
||||||
|
|||||||
@@ -178,9 +178,9 @@ class AxolotlInputConfig(
|
|||||||
|
|
||||||
# torch_dtype: torch.dtype | None
|
# torch_dtype: torch.dtype | None
|
||||||
|
|
||||||
gradient_checkpointing: (
|
gradient_checkpointing: Literal["unsloth", "offload"] | bool | None = Field(
|
||||||
Literal["unsloth", "offload", "offload_disk"] | bool | None
|
default=False
|
||||||
) = Field(default=False)
|
)
|
||||||
gradient_checkpointing_kwargs: dict[str, Any] | None = None
|
gradient_checkpointing_kwargs: dict[str, Any] | None = None
|
||||||
|
|
||||||
unfrozen_parameters: list[str] | None = None
|
unfrozen_parameters: list[str] | None = None
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ class TestKnowledgeDistillation:
|
|||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
assert (Path(temp_dir) / "model.safetensors").exists()
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/loss", 1.2, "Train Loss (%s) is too high"
|
temp_dir + "/runs", "train/loss", 1.0, "Train Loss is too high"
|
||||||
)
|
)
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@@ -121,5 +121,5 @@ class TestKnowledgeDistillation:
|
|||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
assert (Path(temp_dir) / "adapter_model.safetensors").exists()
|
assert (Path(temp_dir) / "adapter_model.safetensors").exists()
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/loss", 1.2, "Train Loss (%s) is too high"
|
temp_dir + "/runs", "train/loss", 1.0, "Train Loss is too high"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,111 +0,0 @@
|
|||||||
"""
|
|
||||||
E2E smoke tests for LLMCompressorPlugin integration
|
|
||||||
"""
|
|
||||||
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
|
||||||
from axolotl.common.datasets import load_datasets
|
|
||||||
from axolotl.train import train
|
|
||||||
from axolotl.utils.config import normalize_config, prepare_plugins, validate_config
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
|
|
||||||
from tests.e2e.utils import (
|
|
||||||
check_model_output_exists,
|
|
||||||
require_llmcompressor,
|
|
||||||
require_torch_2_4_1,
|
|
||||||
)
|
|
||||||
|
|
||||||
MODELS = [
|
|
||||||
"nm-testing/llama2.c-stories42M-pruned2.4-compressed",
|
|
||||||
"nm-testing/llama2.c-stories42M-gsm8k-sparse-only-compressed",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"base_model", MODELS, ids=["no-checkpoint-recipe", "with-checkpoint-recipe"]
|
|
||||||
)
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"save_compressed", [True, False], ids=["save_compressed", "save_uncompressed"]
|
|
||||||
)
|
|
||||||
class TestLLMCompressorIntegration:
|
|
||||||
"""
|
|
||||||
e2e tests for axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
|
||||||
"""
|
|
||||||
|
|
||||||
@require_llmcompressor
|
|
||||||
@require_torch_2_4_1
|
|
||||||
def test_llmcompressor_plugin(
|
|
||||||
self, temp_dir, base_model: str, save_compressed: bool
|
|
||||||
):
|
|
||||||
from llmcompressor import active_session
|
|
||||||
|
|
||||||
# core cfg
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"base_model": base_model,
|
|
||||||
"plugins": ["axolotl.integrations.llm_compressor.LLMCompressorPlugin"],
|
|
||||||
"sequence_len": 1024,
|
|
||||||
"val_set_size": 0.05,
|
|
||||||
"special_tokens": {"pad_token": "<|endoftext|>"},
|
|
||||||
"datasets": [{"path": "mhenrichsen/alpaca_2k_test", "type": "alpaca"}],
|
|
||||||
"num_epochs": 1,
|
|
||||||
"micro_batch_size": 2,
|
|
||||||
"gradient_accumulation_steps": 2,
|
|
||||||
"output_dir": temp_dir,
|
|
||||||
"learning_rate": 1e-5,
|
|
||||||
"optimizer": "adamw_torch_fused",
|
|
||||||
"lr_scheduler": "cosine",
|
|
||||||
"save_safetensors": True,
|
|
||||||
"bf16": "auto",
|
|
||||||
"max_steps": 5,
|
|
||||||
"llmcompressor": {
|
|
||||||
"recipe": {
|
|
||||||
"finetuning_stage": {
|
|
||||||
"finetuning_modifiers": {
|
|
||||||
"ConstantPruningModifier": {
|
|
||||||
"targets": [
|
|
||||||
"re:.*q_proj.weight",
|
|
||||||
"re:.*k_proj.weight",
|
|
||||||
"re:.*v_proj.weight",
|
|
||||||
"re:.*o_proj.weight",
|
|
||||||
"re:.*gate_proj.weight",
|
|
||||||
"re:.*up_proj.weight",
|
|
||||||
"re:.*down_proj.weight",
|
|
||||||
],
|
|
||||||
"start": 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"save_compressed": save_compressed,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
prepare_plugins(cfg)
|
|
||||||
cfg = validate_config(cfg)
|
|
||||||
normalize_config(cfg)
|
|
||||||
cli_args = TrainerCliArgs()
|
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
|
||||||
|
|
||||||
try:
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
|
||||||
check_model_output_exists(temp_dir, cfg)
|
|
||||||
_check_llmcompressor_model_outputs(temp_dir, save_compressed)
|
|
||||||
finally:
|
|
||||||
active_session().reset()
|
|
||||||
|
|
||||||
|
|
||||||
def _check_llmcompressor_model_outputs(temp_dir, save_compressed):
|
|
||||||
if save_compressed:
|
|
||||||
assert (Path(temp_dir) / "recipe.yaml").exists()
|
|
||||||
|
|
||||||
from compressed_tensors import ModelCompressor
|
|
||||||
from compressed_tensors.config import Sparse24BitMaskConfig
|
|
||||||
|
|
||||||
compressor = ModelCompressor.from_pretrained(temp_dir)
|
|
||||||
assert compressor is not None
|
|
||||||
assert isinstance(compressor.sparsity_config, Sparse24BitMaskConfig)
|
|
||||||
@@ -105,25 +105,7 @@ def require_vllm(test_case):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
return unittest.skipUnless(
|
return unittest.skipUnless(
|
||||||
is_vllm_installed(), "test requires vllm to be installed"
|
is_vllm_installed(), "test requires a vllm to be installed"
|
||||||
)(test_case)
|
|
||||||
|
|
||||||
|
|
||||||
def require_llmcompressor(test_case):
|
|
||||||
"""
|
|
||||||
Decorator marking a test that requires a llmcompressor to be installed
|
|
||||||
"""
|
|
||||||
|
|
||||||
def is_llmcompressor_installed():
|
|
||||||
try:
|
|
||||||
import llmcompressor # pylint: disable=unused-import # noqa: F401
|
|
||||||
|
|
||||||
return True
|
|
||||||
except ImportError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return unittest.skipUnless(
|
|
||||||
is_llmcompressor_installed(), "test requires llmcompressor to be installed"
|
|
||||||
)(test_case)
|
)(test_case)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -106,4 +106,3 @@ class TestBatchedSamplerPacking:
|
|||||||
|
|
||||||
original_idxs = set(range(len(train_dataset)))
|
original_idxs = set(range(len(train_dataset)))
|
||||||
assert original_idxs == set(batch_idxs)
|
assert original_idxs == set(batch_idxs)
|
||||||
assert len(batch_idxs) == len(set(batch_idxs))
|
|
||||||
|
|||||||
Reference in New Issue
Block a user