Compare commits
12 Commits
fix/issue-
...
diffusion-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cf8c93e2ee | ||
|
|
63d2280999 | ||
|
|
b210db2d15 | ||
|
|
556a69118f | ||
|
|
8569675b26 | ||
|
|
077b5a4358 | ||
|
|
234b7b3126 | ||
|
|
e19be0c2d9 | ||
|
|
479a454ae3 | ||
|
|
0a9341acde | ||
|
|
d8b63804bc | ||
|
|
3156c605d4 |
57
examples/llama-3/diffusion-3.2-1b-pretrain.yaml
Normal file
57
examples/llama-3/diffusion-3.2-1b-pretrain.yaml
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
base_model: meta-llama/Llama-3.2-1B
|
||||||
|
# Automatically upload checkpoint and final model to HF
|
||||||
|
# hub_model_id: username/custom_model_name
|
||||||
|
|
||||||
|
pretraining_dataset:
|
||||||
|
- path: wikitext
|
||||||
|
name: wikitext-103-raw-v1
|
||||||
|
type: completion
|
||||||
|
field: text
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- diffusion.DiffusionPlugin
|
||||||
|
noise_schedule: cosine
|
||||||
|
min_mask_ratio: 0.15
|
||||||
|
max_mask_ratio: 0.85
|
||||||
|
eps: 5e-4
|
||||||
|
importance_weighting: true
|
||||||
|
mask_token_id: 128002
|
||||||
|
generate_samples: true
|
||||||
|
generation_interval: 10
|
||||||
|
|
||||||
|
output_dir: ./outputs/model-out
|
||||||
|
|
||||||
|
sequence_len: 512
|
||||||
|
sample_packing: true
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
micro_batch_size: 4
|
||||||
|
max_steps: 10000
|
||||||
|
|
||||||
|
optimizer: adamw_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 3e-4
|
||||||
|
|
||||||
|
bf16: auto
|
||||||
|
tf32: true
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
sdp_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 1000
|
||||||
|
|
||||||
|
save_strategy: steps
|
||||||
|
save_steps: 1000
|
||||||
|
|
||||||
|
special_tokens:
|
||||||
|
pad_token: "<|end_of_text|>"
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
# save_first_step: true # uncomment this to validate checkpoint saving works with your config
|
||||||
58
examples/llama-3/diffusion-3.2-1b-sft.yaml
Normal file
58
examples/llama-3/diffusion-3.2-1b-sft.yaml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
base_model: meta-llama/Llama-3.2-1B
|
||||||
|
# Automatically upload checkpoint and final model to HF
|
||||||
|
# hub_model_id: username/custom_model_name
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
|
type: alpaca
|
||||||
|
val_set_size: 0.05
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- diffusion.DiffusionPlugin
|
||||||
|
noise_schedule: cosine
|
||||||
|
min_mask_ratio: 0.1
|
||||||
|
max_mask_ratio: 0.9
|
||||||
|
num_diffusion_steps: 128
|
||||||
|
eps: 1e-3
|
||||||
|
importance_weighting: true
|
||||||
|
mask_token_id: 128002
|
||||||
|
|
||||||
|
output_dir: ./outputs/model-out
|
||||||
|
|
||||||
|
sequence_len: 512
|
||||||
|
sample_packing: true
|
||||||
|
eval_sample_packing: true
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 4
|
||||||
|
num_epochs: 1
|
||||||
|
|
||||||
|
optimizer: adamw_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 1e-5
|
||||||
|
|
||||||
|
bf16: auto
|
||||||
|
tf32: true
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
sdp_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 1000
|
||||||
|
|
||||||
|
save_strategy: steps
|
||||||
|
eval_strategy: steps
|
||||||
|
save_steps: 500
|
||||||
|
eval_steps: 500
|
||||||
|
|
||||||
|
special_tokens:
|
||||||
|
pad_token: "<|end_of_text|>"
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
# save_first_step: true # uncomment this to validate checkpoint saving works with your config
|
||||||
@@ -10,6 +10,7 @@ import transformers
|
|||||||
from transformers import (
|
from transformers import (
|
||||||
DataCollatorWithFlattening,
|
DataCollatorWithFlattening,
|
||||||
EarlyStoppingCallback,
|
EarlyStoppingCallback,
|
||||||
|
Trainer,
|
||||||
)
|
)
|
||||||
from trl.trainer.utils import RewardDataCollatorWithPadding
|
from trl.trainer.utils import RewardDataCollatorWithPadding
|
||||||
|
|
||||||
@@ -385,10 +386,11 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
**data_collator_kwargs,
|
**data_collator_kwargs,
|
||||||
)
|
)
|
||||||
sig = inspect.signature(trainer_cls)
|
sig = inspect.signature(trainer_cls)
|
||||||
if "processing_class" in sig.parameters:
|
if "processing_class" in sig.parameters or issubclass(trainer_cls, Trainer):
|
||||||
trainer_kwargs["processing_class"] = self.tokenizer
|
trainer_kwargs["processing_class"] = self.tokenizer
|
||||||
elif "tokenizer" in sig.parameters:
|
elif "tokenizer" in sig.parameters:
|
||||||
trainer_kwargs["tokenizer"] = self.tokenizer
|
trainer_kwargs["tokenizer"] = self.tokenizer
|
||||||
|
|
||||||
if (
|
if (
|
||||||
trainer_cls not in [AxolotlRewardTrainer, AxolotlPRMTrainer]
|
trainer_cls not in [AxolotlRewardTrainer, AxolotlPRMTrainer]
|
||||||
and self.cfg.datasets is not None
|
and self.cfg.datasets is not None
|
||||||
|
|||||||
@@ -82,7 +82,9 @@ class AxolotlTrainer(
|
|||||||
super().__init__(*_args, **kwargs)
|
super().__init__(*_args, **kwargs)
|
||||||
|
|
||||||
self.train_data_collator = self.data_collator
|
self.train_data_collator = self.data_collator
|
||||||
self._stored_metrics = defaultdict(lambda: defaultdict(list))
|
self._stored_metrics = defaultdict(
|
||||||
|
lambda: defaultdict(lambda: {"values": [], "reduction": "mean"})
|
||||||
|
)
|
||||||
if self.args.orpo_alpha:
|
if self.args.orpo_alpha:
|
||||||
self.loss_fct = torch.nn.CrossEntropyLoss(reduction="none")
|
self.loss_fct = torch.nn.CrossEntropyLoss(reduction="none")
|
||||||
|
|
||||||
@@ -272,6 +274,18 @@ class AxolotlTrainer(
|
|||||||
num_workers=self.args.dataloader_num_workers,
|
num_workers=self.args.dataloader_num_workers,
|
||||||
rank=self.args.process_index,
|
rank=self.args.process_index,
|
||||||
)
|
)
|
||||||
|
if (self.args.accelerator_config is not None
|
||||||
|
and self.args.accelerator_config.split_batches
|
||||||
|
and self.args.accelerator_config.dispatch_batches
|
||||||
|
):
|
||||||
|
if self.args.sample_packing and self.args.pretraining:
|
||||||
|
if not self.args.eval_sample_packing and not is_training:
|
||||||
|
dataloader_params["batch_size"] *= self.accelerator.num_processes
|
||||||
|
else:
|
||||||
|
dataloader_params["batch_size"] = self.accelerator.num_processes
|
||||||
|
elif not self.args.sample_packing and self.args.pretraining:
|
||||||
|
dataloader_params["batch_size"] *= self.accelerator.num_processes
|
||||||
|
|
||||||
if self.args.sample_packing and (
|
if self.args.sample_packing and (
|
||||||
(is_training and not self.args.pretraining)
|
(is_training and not self.args.pretraining)
|
||||||
or (not is_training and self.args.eval_sample_packing is not False)
|
or (not is_training and self.args.eval_sample_packing is not False)
|
||||||
@@ -573,9 +587,26 @@ class AxolotlTrainer(
|
|||||||
"""
|
"""
|
||||||
# logs either has 'loss' or 'eval_loss'
|
# logs either has 'loss' or 'eval_loss'
|
||||||
train_eval = "train" if "loss" in logs else "eval"
|
train_eval = "train" if "loss" in logs else "eval"
|
||||||
# Add averaged stored metrics to logs
|
|
||||||
for key, metrics in self._stored_metrics[train_eval].items():
|
# Add reduced stored metrics to logs
|
||||||
logs[key] = torch.tensor(metrics).mean().item()
|
for key, metric_data in self._stored_metrics[train_eval].items():
|
||||||
|
values = torch.tensor(metric_data["values"])
|
||||||
|
reduction_type = metric_data["reduction"]
|
||||||
|
|
||||||
|
if reduction_type == "mean":
|
||||||
|
logs[key] = values.mean().item()
|
||||||
|
elif reduction_type == "min":
|
||||||
|
logs[key] = values.min().item()
|
||||||
|
elif reduction_type == "max":
|
||||||
|
logs[key] = values.max().item()
|
||||||
|
elif reduction_type == "sum":
|
||||||
|
logs[key] = values.sum().item()
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(
|
||||||
|
"Metric reduction must be one of [mean, min, max, sum]"
|
||||||
|
)
|
||||||
|
|
||||||
|
logs[key] = round(logs[key], 4)
|
||||||
|
|
||||||
if is_main_process():
|
if is_main_process():
|
||||||
# Add memory usage
|
# Add memory usage
|
||||||
@@ -592,10 +623,27 @@ class AxolotlTrainer(
|
|||||||
return super().log(logs, start_time)
|
return super().log(logs, start_time)
|
||||||
|
|
||||||
def store_metrics(
|
def store_metrics(
|
||||||
self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train"
|
self,
|
||||||
|
metrics: dict[str, float] | dict[str, tuple[int | float, str]],
|
||||||
|
train_eval: Literal["train", "eval"] = "train",
|
||||||
|
reduction: Literal["mean", "min", "max", "sum"] = "mean",
|
||||||
) -> None:
|
) -> None:
|
||||||
|
"""
|
||||||
|
Store metrics with specified reduction type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
metrics: Dictionary of metric names to values, or metric names to (value,
|
||||||
|
reduction_type) tuples.
|
||||||
|
train_eval: Whether this is for training or evaluation.
|
||||||
|
"""
|
||||||
for key, value in metrics.items():
|
for key, value in metrics.items():
|
||||||
self._stored_metrics[train_eval][key].append(value)
|
if isinstance(value, tuple):
|
||||||
|
metric_value, metric_reduction = value
|
||||||
|
else:
|
||||||
|
metric_value, metric_reduction = value, reduction
|
||||||
|
|
||||||
|
self._stored_metrics[train_eval][key]["values"].append(metric_value)
|
||||||
|
self._stored_metrics[train_eval][key]["reduction"] = metric_reduction
|
||||||
|
|
||||||
def _save_checkpoint(self, model, trial, **kwargs):
|
def _save_checkpoint(self, model, trial, **kwargs):
|
||||||
# make sure the checkpoint dir exists, since trainer is flakey
|
# make sure the checkpoint dir exists, since trainer is flakey
|
||||||
|
|||||||
@@ -147,7 +147,7 @@ class BasePlugin:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# pylint: disable=unused-argument
|
# pylint: disable=unused-argument
|
||||||
def get_trainer_cls(self, cfg: DictDefault) -> Trainer | None:
|
def get_trainer_cls(self, cfg: DictDefault) -> type[Trainer] | None:
|
||||||
"""Returns a custom class for the trainer.
|
"""Returns a custom class for the trainer.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
|||||||
125
src/axolotl/integrations/diffusion/README.md
Normal file
125
src/axolotl/integrations/diffusion/README.md
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
# Diffusion LM Training Plugin for Axolotl
|
||||||
|
|
||||||
|
This plugin enables diffusion language model training using the LLaDA (Large Language
|
||||||
|
And Diffusion Assistant) approach within the Axolotl framework.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
LLaDA is a diffusion-based approach to language model training that uses:
|
||||||
|
- **Random token masking** during training instead of next-token prediction
|
||||||
|
- **Bidirectional attention** to allow the model to see the full context
|
||||||
|
- **Importance weighting** based on masking probabilities for stable training
|
||||||
|
|
||||||
|
This approach can lead to more robust language models with better understanding of
|
||||||
|
bidirectional context.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
The plugin is included with Axolotl. To use it, simply add the plugin configuration to
|
||||||
|
your training config.
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
### Basic Configuration
|
||||||
|
|
||||||
|
Add the following to your Axolotl configuration YAML:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Enable diffusion LM training plugin
|
||||||
|
plugins:
|
||||||
|
- axolotl.integrations.diffusion.DiffusionPlugin
|
||||||
|
|
||||||
|
# Diffusion-specific configuration
|
||||||
|
noise_schedule: linear # or "cosine"
|
||||||
|
min_mask_ratio: 0.1
|
||||||
|
max_mask_ratio: 0.9
|
||||||
|
num_diffusion_steps: 128
|
||||||
|
eps: 1e-3
|
||||||
|
importance_weighting: true
|
||||||
|
mask_token_id: 128002
|
||||||
|
|
||||||
|
# Sample generation (optional)
|
||||||
|
generate_samples: true
|
||||||
|
generation_interval: 100
|
||||||
|
num_generation_samples: 3
|
||||||
|
generation_steps: 128
|
||||||
|
generation_temperature: 0.0
|
||||||
|
generation_max_length: 100
|
||||||
|
|
||||||
|
# Model configuration
|
||||||
|
base_model: meta-llama/Llama-3.2-1B
|
||||||
|
model_type: llama
|
||||||
|
|
||||||
|
# Standard Axolotl configuration
|
||||||
|
datasets:
|
||||||
|
- path: your_dataset
|
||||||
|
...
|
||||||
|
|
||||||
|
# Other config
|
||||||
|
sequence_len: 1024
|
||||||
|
micro_batch_size: 8
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
learning_rate: 3e-4
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported Models
|
||||||
|
|
||||||
|
Any models that support 4D attention masks should work out of the box. If not, please
|
||||||
|
create an [issue](https://github.com/axolotl-ai-cloud/axolotl/issues)!
|
||||||
|
|
||||||
|
## How It Works
|
||||||
|
|
||||||
|
### Random Masking
|
||||||
|
During training, tokens are randomly masked based on a sampled timestep:
|
||||||
|
- Sample timestep `t` uniformly from [0, 1]
|
||||||
|
- Calculate masking probability: `p = (1 - eps) * t + eps`
|
||||||
|
- Randomly mask tokens with probability `p`
|
||||||
|
|
||||||
|
### Bidirectional Attention
|
||||||
|
The plugin uses native 4D attention masks to:
|
||||||
|
- Enable bidirectional attention without patches
|
||||||
|
- Allow all tokens to attend to all other tokens
|
||||||
|
- Maintain proper padding masks
|
||||||
|
- Work with modern `transformers` models out of the box
|
||||||
|
|
||||||
|
### Diffusion Loss
|
||||||
|
|
||||||
|
Loss is computed only on masked tokens with (optional) importance weighting:
|
||||||
|
|
||||||
|
```python
|
||||||
|
loss = sum(cross_entropy(pred, target) / p_mask) / total_tokens
|
||||||
|
```
|
||||||
|
|
||||||
|
## Sample Generation
|
||||||
|
|
||||||
|
When `generate_samples: true`, the plugin generates samples during training:
|
||||||
|
|
||||||
|
```
|
||||||
|
Sample 1:
|
||||||
|
Original (45 tokens): The quick brown fox jumps over the lazy dog...
|
||||||
|
Masked (18/45 tokens, 40.0%): The [MASK] [MASK] fox [MASK] over [MASK] lazy [MASK]...
|
||||||
|
Generated: The quick brown fox jumps over the lazy dog...
|
||||||
|
```
|
||||||
|
|
||||||
|
Samples are logged to console and wandb (if enabled).
|
||||||
|
|
||||||
|
## Metrics and Monitoring
|
||||||
|
|
||||||
|
The plugin adds several metrics to track diffusion training:
|
||||||
|
|
||||||
|
- `train/loss`: Weighted diffusion loss
|
||||||
|
- `train/accuracy`: Accuracy on masked tokens
|
||||||
|
- `train/mask_ratio`: Average fraction of tokens masked
|
||||||
|
- `train/num_masked_tokens`: Number of tokens masked
|
||||||
|
- `train/avg_p_mask`: Average masking probability
|
||||||
|
- `train/ce_loss`: Unweighted cross-entropy loss
|
||||||
|
- `train/importance_weight_avg`: Average importance weight
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
- No flash attention support
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [LLaDA Paper](https://arxiv.org/abs/2404.10406)
|
||||||
|
- [Axolotl Documentation](https://docs.axolotl.ai/)
|
||||||
6
src/axolotl/integrations/diffusion/__init__.py
Normal file
6
src/axolotl/integrations/diffusion/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
"""Diffusion LM training plugin init."""
|
||||||
|
|
||||||
|
from .args import DiffusionArgs
|
||||||
|
from .plugin import DiffusionPlugin
|
||||||
|
|
||||||
|
__all__ = ["DiffusionArgs", "DiffusionPlugin"]
|
||||||
70
src/axolotl/integrations/diffusion/args.py
Normal file
70
src/axolotl/integrations/diffusion/args.py
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
"""Config args for diffusion LM training."""
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class DiffusionArgs(BaseModel):
|
||||||
|
"""Arguments for diffusion LM training plugin."""
|
||||||
|
|
||||||
|
# Noise schedule config
|
||||||
|
noise_schedule: Literal["linear", "cosine"] = Field(
|
||||||
|
default="linear", description="Type of noise schedule for diffusion training"
|
||||||
|
)
|
||||||
|
min_mask_ratio: float = Field(
|
||||||
|
default=0.1,
|
||||||
|
ge=0.0,
|
||||||
|
le=1.0,
|
||||||
|
description="Minimum masking ratio for diffusion noise schedule",
|
||||||
|
)
|
||||||
|
max_mask_ratio: float = Field(
|
||||||
|
default=0.9,
|
||||||
|
ge=0.0,
|
||||||
|
le=1.0,
|
||||||
|
description="Maximum masking ratio for diffusion noise schedule",
|
||||||
|
)
|
||||||
|
num_diffusion_steps: int = Field(
|
||||||
|
default=128, ge=1, description="Number of diffusion timesteps"
|
||||||
|
)
|
||||||
|
eps: float = Field(
|
||||||
|
default=1e-3,
|
||||||
|
ge=0.0,
|
||||||
|
le=1.0,
|
||||||
|
description="Epsilon value for minimum masking probability in forward process",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Training config
|
||||||
|
importance_weighting: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Apply importance weighting to loss based on masking probability",
|
||||||
|
)
|
||||||
|
mask_token_id: int = Field(
|
||||||
|
default=128002,
|
||||||
|
description=(
|
||||||
|
"Token ID to use for masking. Default is 128002 "
|
||||||
|
"(<|reserved_special_token_0|> for Llama 3.2)"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Sample generation config
|
||||||
|
generate_samples: bool = Field(
|
||||||
|
default=True, description="Enable sample generation during training"
|
||||||
|
)
|
||||||
|
generation_interval: int = Field(
|
||||||
|
default=100, ge=1, description="Generate samples every N steps"
|
||||||
|
)
|
||||||
|
num_generation_samples: int = Field(
|
||||||
|
default=3, ge=1, description="Number of samples to generate each time"
|
||||||
|
)
|
||||||
|
generation_steps: int = Field(
|
||||||
|
default=128, ge=1, description="Number of diffusion steps for generation"
|
||||||
|
)
|
||||||
|
generation_temperature: float = Field(
|
||||||
|
default=0.0,
|
||||||
|
ge=0.0,
|
||||||
|
description="Temperature for generation sampling (0.0 = deterministic)",
|
||||||
|
)
|
||||||
|
generation_max_length: int = Field(
|
||||||
|
default=100, ge=1, description="Maximum sequence length for generation"
|
||||||
|
)
|
||||||
113
src/axolotl/integrations/diffusion/callbacks.py
Normal file
113
src/axolotl/integrations/diffusion/callbacks.py
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
"""Callbacks for diffusion training."""
|
||||||
|
|
||||||
|
import wandb
|
||||||
|
from transformers.trainer_callback import TrainerCallback, TrainerControl, TrainerState
|
||||||
|
from transformers.training_args import TrainingArguments
|
||||||
|
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
|
from .generation import generate_samples
|
||||||
|
|
||||||
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DiffusionGenerationCallback(TrainerCallback):
|
||||||
|
"""Callback for generating samples during diffusion training."""
|
||||||
|
|
||||||
|
def __init__(self, trainer):
|
||||||
|
self.trainer = trainer
|
||||||
|
|
||||||
|
# pylint: disable=unused-argument
|
||||||
|
def on_step_end(
|
||||||
|
self,
|
||||||
|
args: TrainingArguments,
|
||||||
|
state: TrainerState,
|
||||||
|
control: TrainerControl,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
"""Generate samples at specified intervals."""
|
||||||
|
if (
|
||||||
|
state.global_step > 0
|
||||||
|
and state.global_step % self.trainer.config.generation_interval == 0
|
||||||
|
):
|
||||||
|
# Use eval dataloader if available, otherwise use train dataloader
|
||||||
|
if (
|
||||||
|
hasattr(self.trainer, "eval_dataset")
|
||||||
|
and self.trainer.eval_dataset is not None
|
||||||
|
):
|
||||||
|
dataloader = self.trainer.callback_handler.eval_dataloader
|
||||||
|
else:
|
||||||
|
dataloader = self.trainer.callback_handler.train_dataloader
|
||||||
|
|
||||||
|
# Generate samples
|
||||||
|
samples = generate_samples(
|
||||||
|
model=self.trainer.model,
|
||||||
|
tokenizer=self.trainer.tokenizer,
|
||||||
|
dataloader=dataloader,
|
||||||
|
num_generation_samples=self.trainer.config.num_generation_samples,
|
||||||
|
max_length=self.trainer.config.generation_max_length,
|
||||||
|
num_diffusion_steps=self.trainer.config.generation_steps,
|
||||||
|
temperature=self.trainer.config.generation_temperature,
|
||||||
|
mask_token_id=self.trainer.config.mask_token_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Log samples
|
||||||
|
self._log_samples(samples, state.global_step)
|
||||||
|
|
||||||
|
def _log_samples(self, samples: list, step: int):
|
||||||
|
"""Log generated samples."""
|
||||||
|
if not samples:
|
||||||
|
return
|
||||||
|
|
||||||
|
LOG.info("=" * 60)
|
||||||
|
LOG.info("GENERATED SAMPLES")
|
||||||
|
LOG.info("=" * 60)
|
||||||
|
|
||||||
|
for i, sample_data in enumerate(samples, 1):
|
||||||
|
original = sample_data["original"]
|
||||||
|
masked = sample_data["masked"]
|
||||||
|
generated = sample_data["generated"]
|
||||||
|
mask_ratio = sample_data["mask_ratio"]
|
||||||
|
masked_tokens = sample_data["masked_tokens"]
|
||||||
|
total_tokens = sample_data["total_tokens"]
|
||||||
|
|
||||||
|
LOG.info(f"\nSample {i}:")
|
||||||
|
LOG.info(f"\tOriginal ({total_tokens} tokens): {original}")
|
||||||
|
LOG.info(
|
||||||
|
f"\tMasked ({masked_tokens}/{total_tokens} tokens, "
|
||||||
|
f"{mask_ratio:.1%}): {masked}"
|
||||||
|
)
|
||||||
|
LOG.info(f"\tGenerated: {generated}")
|
||||||
|
|
||||||
|
LOG.info("=" * 60)
|
||||||
|
|
||||||
|
if self.trainer.config.use_wandb and self.trainer.state.is_world_process_zero:
|
||||||
|
if wandb.run is not None:
|
||||||
|
wandb.log(
|
||||||
|
{
|
||||||
|
"generated_samples": wandb.Table(
|
||||||
|
columns=[
|
||||||
|
"step",
|
||||||
|
"original",
|
||||||
|
"masked",
|
||||||
|
"generated",
|
||||||
|
"mask_ratio",
|
||||||
|
"masked_tokens",
|
||||||
|
"total_tokens",
|
||||||
|
],
|
||||||
|
data=[
|
||||||
|
[
|
||||||
|
step,
|
||||||
|
sample["original"],
|
||||||
|
sample["masked"],
|
||||||
|
sample["generated"],
|
||||||
|
f"{sample['mask_ratio']:.1%}",
|
||||||
|
sample["masked_tokens"],
|
||||||
|
sample["total_tokens"],
|
||||||
|
]
|
||||||
|
for sample in samples
|
||||||
|
],
|
||||||
|
)
|
||||||
|
},
|
||||||
|
step=step,
|
||||||
|
)
|
||||||
269
src/axolotl/integrations/diffusion/generation.py
Normal file
269
src/axolotl/integrations/diffusion/generation.py
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
"""Sample generation utilities for diffusion training."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Any, List, Optional
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_samples(
|
||||||
|
model: torch.nn.Module,
|
||||||
|
tokenizer: Any,
|
||||||
|
dataloader: Optional[Any] = None,
|
||||||
|
num_generation_samples: int = 3,
|
||||||
|
max_length: int = 100,
|
||||||
|
num_diffusion_steps: int = 128,
|
||||||
|
temperature: float = 0.0,
|
||||||
|
mask_token_id: int = 32000,
|
||||||
|
) -> List[dict]:
|
||||||
|
"""
|
||||||
|
Generate text samples using the diffusion model by randomly masking sequences from
|
||||||
|
the given dataset and running the reverse diffusion process.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: The wrapped or unwrapped model
|
||||||
|
tokenizer: Tokenizer for encoding/decoding
|
||||||
|
dataloader: Validation dataloader (for sampling sequences)
|
||||||
|
num_generation_samples: Number of samples to generate
|
||||||
|
max_length: Maximum length of sequences to use
|
||||||
|
num_diffusion_steps: Number of diffusion steps for generation
|
||||||
|
temperature: Temperature for sampling (0.0 = deterministic)
|
||||||
|
mask_token_id: Token ID used for masking
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of dictionaries with original text, masked text, and generated text
|
||||||
|
"""
|
||||||
|
if dataloader is None:
|
||||||
|
logger.warning("No validation dataloader provided, cannot generate samples")
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Get the actual model (unwrap if needed)
|
||||||
|
unwrapped_model = model.module if hasattr(model, "module") else model
|
||||||
|
unwrapped_model.eval()
|
||||||
|
generations = []
|
||||||
|
|
||||||
|
# Sample sequences from validation dataset
|
||||||
|
sampled_sequences = _sample_sequences_from_dataloader(
|
||||||
|
dataloader, num_generation_samples, max_length, unwrapped_model.device
|
||||||
|
)
|
||||||
|
logger.info(f"Sampled {len(sampled_sequences)} sequences from validation dataset")
|
||||||
|
|
||||||
|
# Generate samples using reverse diffusion process
|
||||||
|
with torch.no_grad():
|
||||||
|
for original_sequence in sampled_sequences:
|
||||||
|
generation_result = _generate(
|
||||||
|
unwrapped_model,
|
||||||
|
tokenizer,
|
||||||
|
original_sequence,
|
||||||
|
num_diffusion_steps,
|
||||||
|
temperature,
|
||||||
|
mask_token_id,
|
||||||
|
)
|
||||||
|
generations.append(generation_result)
|
||||||
|
|
||||||
|
unwrapped_model.train()
|
||||||
|
return generations
|
||||||
|
|
||||||
|
|
||||||
|
def _sample_sequences_from_dataloader(
|
||||||
|
dataloader: Any, num_samples: int, max_length: int, device: torch.device
|
||||||
|
) -> List[torch.Tensor]:
|
||||||
|
"""Sample sequences from validation dataloader."""
|
||||||
|
sampled_sequences = []
|
||||||
|
sample_count = 0
|
||||||
|
|
||||||
|
# Add randomness by skipping a random number of batches
|
||||||
|
skip_batches = torch.randint(0, 6, (1,)).item()
|
||||||
|
batch_count = 0
|
||||||
|
|
||||||
|
for batch in dataloader:
|
||||||
|
# Skip some batches for variety
|
||||||
|
if batch_count < skip_batches:
|
||||||
|
batch_count += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
if sample_count >= num_samples:
|
||||||
|
break
|
||||||
|
|
||||||
|
batch_count += 1
|
||||||
|
input_ids = batch["input_ids"]
|
||||||
|
attention_mask = batch.get("attention_mask")
|
||||||
|
|
||||||
|
# Randomly sample from sequences in this batch
|
||||||
|
batch_indices = torch.randperm(input_ids.size(0)).tolist()
|
||||||
|
|
||||||
|
for i in batch_indices:
|
||||||
|
if sample_count >= num_samples:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Get actual sequence length (non-padded)
|
||||||
|
if attention_mask is not None:
|
||||||
|
seq_len = attention_mask[i].sum().item()
|
||||||
|
else:
|
||||||
|
seq_len = input_ids.size(1)
|
||||||
|
|
||||||
|
# Limit sequence length to max_length
|
||||||
|
actual_length = min(seq_len, max_length)
|
||||||
|
if actual_length < 10: # Skip very short sequences
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Extract the sequence
|
||||||
|
sequence = input_ids[i][:actual_length].unsqueeze(0).to(device)
|
||||||
|
sampled_sequences.append(sequence)
|
||||||
|
sample_count += 1
|
||||||
|
|
||||||
|
return sampled_sequences
|
||||||
|
|
||||||
|
|
||||||
|
def _generate(
|
||||||
|
model: torch.nn.Module,
|
||||||
|
tokenizer: Any,
|
||||||
|
original_sequence: torch.Tensor,
|
||||||
|
num_diffusion_steps: int,
|
||||||
|
temperature: float,
|
||||||
|
mask_token_id: int,
|
||||||
|
) -> dict:
|
||||||
|
"""Generate a single sample using reverse diffusion."""
|
||||||
|
# Get original text for comparison
|
||||||
|
original_text = tokenizer.decode(
|
||||||
|
original_sequence[0].cpu(), skip_special_tokens=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply custom masking with random ratio (10% to 70%)
|
||||||
|
total_tokens = original_sequence.size(1)
|
||||||
|
min_ratio, max_ratio = 0.1, 0.7
|
||||||
|
target_mask_ratio = torch.rand(1).item() * (max_ratio - min_ratio) + min_ratio
|
||||||
|
target_masked_tokens = int(total_tokens * target_mask_ratio)
|
||||||
|
|
||||||
|
# Create random mask indices
|
||||||
|
mask_positions = torch.randperm(total_tokens)[:target_masked_tokens]
|
||||||
|
masked_indices = torch.zeros(
|
||||||
|
1, total_tokens, dtype=torch.bool, device=original_sequence.device
|
||||||
|
)
|
||||||
|
masked_indices[0, mask_positions] = True
|
||||||
|
|
||||||
|
# Create masked sequence
|
||||||
|
masked_sequence = original_sequence.clone()
|
||||||
|
masked_sequence[masked_indices] = mask_token_id
|
||||||
|
|
||||||
|
# Calculate actual mask ratio
|
||||||
|
masked_tokens = masked_indices.sum().item()
|
||||||
|
mask_ratio = masked_tokens / total_tokens
|
||||||
|
|
||||||
|
# Get masked text for comparison
|
||||||
|
masked_text = tokenizer.decode(masked_sequence[0].cpu(), skip_special_tokens=False)
|
||||||
|
# Clean up mask token representation
|
||||||
|
masked_text = _clean_masked_text(masked_text, tokenizer, mask_token_id)
|
||||||
|
|
||||||
|
# Run reverse diffusion process
|
||||||
|
sequence = masked_sequence.clone()
|
||||||
|
for step in range(num_diffusion_steps):
|
||||||
|
sequence = _diffusion_step(
|
||||||
|
model, sequence, step, num_diffusion_steps, temperature, mask_token_id
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get final generated text
|
||||||
|
generated_text = tokenizer.decode(sequence[0].cpu(), skip_special_tokens=True)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"original": original_text,
|
||||||
|
"masked": masked_text,
|
||||||
|
"generated": generated_text,
|
||||||
|
"mask_ratio": mask_ratio,
|
||||||
|
"masked_tokens": masked_tokens,
|
||||||
|
"total_tokens": total_tokens,
|
||||||
|
"formatted": (
|
||||||
|
f"Original: '{original_text}' → Masked: '{masked_text}' "
|
||||||
|
f"({mask_ratio:.1%}) → Generated: '{generated_text}'"
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _clean_masked_text(masked_text: str, tokenizer: Any, mask_token_id: int) -> str:
|
||||||
|
"""Clean up masked text for display."""
|
||||||
|
mask_token_repr = tokenizer.decode([mask_token_id], skip_special_tokens=False)
|
||||||
|
cleaned = masked_text.replace(mask_token_repr, "[MASK]")
|
||||||
|
|
||||||
|
if hasattr(tokenizer, "special_tokens_map"):
|
||||||
|
for token_value in tokenizer.special_tokens_map.values():
|
||||||
|
if token_value and isinstance(token_value, str):
|
||||||
|
cleaned = cleaned.replace(token_value, "")
|
||||||
|
|
||||||
|
cleaned = " ".join(cleaned.split()).strip()
|
||||||
|
|
||||||
|
return cleaned
|
||||||
|
|
||||||
|
|
||||||
|
def _diffusion_step(
|
||||||
|
model: torch.nn.Module,
|
||||||
|
sequence: torch.Tensor,
|
||||||
|
step: int,
|
||||||
|
num_diffusion_steps: int,
|
||||||
|
temperature: float,
|
||||||
|
mask_token_id: int,
|
||||||
|
) -> torch.Tensor:
|
||||||
|
"""Perform a single diffusion step with remasking."""
|
||||||
|
# Only process if there are masked tokens remaining
|
||||||
|
current_mask = sequence == mask_token_id
|
||||||
|
if not current_mask.any():
|
||||||
|
return sequence
|
||||||
|
|
||||||
|
# Create bidirectional attention mask for diffusion
|
||||||
|
batch_size, seq_len = sequence.shape
|
||||||
|
attention_mask = torch.ones(
|
||||||
|
batch_size, 1, seq_len, seq_len, dtype=torch.bool, device=sequence.device
|
||||||
|
)
|
||||||
|
|
||||||
|
# Forward pass
|
||||||
|
outputs = model(input_ids=sequence, attention_mask=attention_mask)
|
||||||
|
logits = outputs.logits
|
||||||
|
|
||||||
|
# Only sample at currently masked positions
|
||||||
|
if current_mask.any():
|
||||||
|
masked_logits = logits[current_mask]
|
||||||
|
|
||||||
|
# Apply temperature scaling
|
||||||
|
if temperature > 0:
|
||||||
|
scaled_logits = masked_logits / temperature
|
||||||
|
else:
|
||||||
|
scaled_logits = masked_logits
|
||||||
|
|
||||||
|
# Suppress mask token in outputs
|
||||||
|
scaled_logits[:, mask_token_id] = -float("inf")
|
||||||
|
|
||||||
|
# Sample predictions
|
||||||
|
if temperature > 0:
|
||||||
|
# Add Gumbel noise for sampling
|
||||||
|
gumbel_noise = -torch.log(
|
||||||
|
-torch.log(torch.rand_like(scaled_logits, dtype=torch.float32))
|
||||||
|
)
|
||||||
|
gumbel_logits = scaled_logits + gumbel_noise
|
||||||
|
predicted_tokens = torch.argmax(gumbel_logits, dim=-1)
|
||||||
|
else:
|
||||||
|
# Deterministic sampling when temperature is 0
|
||||||
|
predicted_tokens = torch.argmax(scaled_logits, dim=-1)
|
||||||
|
|
||||||
|
# Calculate probabilities for confidence scoring
|
||||||
|
probs = torch.softmax(scaled_logits, dim=-1)
|
||||||
|
predicted_token_probs = probs[range(len(predicted_tokens)), predicted_tokens]
|
||||||
|
|
||||||
|
# Determine how many tokens to unmask this step
|
||||||
|
remaining_masked = current_mask.sum().item()
|
||||||
|
if step == num_diffusion_steps - 1:
|
||||||
|
num_to_unmask = remaining_masked
|
||||||
|
else:
|
||||||
|
unmask_ratio = 1.0 / (num_diffusion_steps - step)
|
||||||
|
num_to_unmask = max(1, int(remaining_masked * unmask_ratio))
|
||||||
|
|
||||||
|
# Select highest confidence predictions to unmask
|
||||||
|
if num_to_unmask >= remaining_masked:
|
||||||
|
sequence[current_mask] = predicted_tokens
|
||||||
|
else:
|
||||||
|
_, top_indices = predicted_token_probs.topk(num_to_unmask)
|
||||||
|
mask_positions = torch.where(current_mask)[1]
|
||||||
|
positions_to_unmask = mask_positions[top_indices]
|
||||||
|
sequence[0, positions_to_unmask] = predicted_tokens[top_indices]
|
||||||
|
|
||||||
|
return sequence
|
||||||
41
src/axolotl/integrations/diffusion/plugin.py
Normal file
41
src/axolotl/integrations/diffusion/plugin.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
"""Diffusion LM training plugin for Axolotl."""
|
||||||
|
|
||||||
|
from peft import PeftModel
|
||||||
|
from transformers import PreTrainedModel
|
||||||
|
|
||||||
|
from axolotl.integrations.base import BasePlugin
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
|
from .trainer import DiffusionTrainer
|
||||||
|
|
||||||
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DiffusionPlugin(BasePlugin):
|
||||||
|
"""
|
||||||
|
Plugin for diffusion language model training.
|
||||||
|
|
||||||
|
This plugin enables diffusion-based training using the LLaDA approach, which uses
|
||||||
|
random masking and bidirectional attention to train language models.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.cfg = None
|
||||||
|
|
||||||
|
def get_input_args(self) -> str:
|
||||||
|
"""Returns the pydantic model for LLaDA plugin arguments."""
|
||||||
|
return "axolotl.integrations.diffusion.DiffusionArgs"
|
||||||
|
|
||||||
|
def post_model_load(self, cfg: DictDefault, model: PreTrainedModel | PeftModel):
|
||||||
|
"""Perform actions after model is loaded."""
|
||||||
|
self.cfg = cfg
|
||||||
|
|
||||||
|
def get_trainer_cls(self, cfg: DictDefault) -> type[DiffusionTrainer] | None:
|
||||||
|
"""Return custom trainer class for diffusion training."""
|
||||||
|
return DiffusionTrainer
|
||||||
|
|
||||||
|
def post_trainer_create(self, cfg: DictDefault, trainer: DiffusionTrainer):
|
||||||
|
"""Configure trainer after creation."""
|
||||||
|
trainer.set_config(cfg)
|
||||||
336
src/axolotl/integrations/diffusion/trainer.py
Normal file
336
src/axolotl/integrations/diffusion/trainer.py
Normal file
@@ -0,0 +1,336 @@
|
|||||||
|
"""Custom trainer for diffusion LM training."""
|
||||||
|
|
||||||
|
from typing import Any, Literal
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import torch.nn.functional as F
|
||||||
|
from torch import nn
|
||||||
|
from transformers.masking_utils import find_packed_sequence_indices
|
||||||
|
|
||||||
|
from axolotl.core.trainers.base import AxolotlTrainer
|
||||||
|
from axolotl.integrations.diffusion.utils import create_bidirectional_block_mask
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
|
from .callbacks import DiffusionGenerationCallback
|
||||||
|
|
||||||
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DiffusionTrainer(AxolotlTrainer): # pylint: disable=too-many-ancestors
|
||||||
|
"""Custom trainer for diffusion LM training that overrides loss computation."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.config = None
|
||||||
|
self._special_token_ids = None
|
||||||
|
|
||||||
|
def set_config(self, config: DictDefault):
|
||||||
|
"""Set config for diffusion training."""
|
||||||
|
self.config = config
|
||||||
|
self._cache_special_token_ids()
|
||||||
|
|
||||||
|
if config.generate_samples:
|
||||||
|
generation_callback = DiffusionGenerationCallback(self)
|
||||||
|
self.add_callback(generation_callback)
|
||||||
|
|
||||||
|
def compute_loss(
|
||||||
|
self,
|
||||||
|
model: nn.Module,
|
||||||
|
inputs: dict[str, torch.Tensor],
|
||||||
|
return_outputs: bool = False,
|
||||||
|
num_items_in_batch: torch.Tensor | None = None,
|
||||||
|
) -> torch.Tensor | tuple[torch.Tensor, dict[str, torch.Tensor]]:
|
||||||
|
"""Override compute_loss to use diffusion loss."""
|
||||||
|
input_ids = inputs.get("input_ids")
|
||||||
|
attention_mask = inputs.get("attention_mask")
|
||||||
|
labels = inputs.get("labels")
|
||||||
|
position_ids = inputs.get("position_ids")
|
||||||
|
|
||||||
|
if input_ids is None:
|
||||||
|
raise ValueError("input_ids is required for diffusion training")
|
||||||
|
|
||||||
|
loss, outputs = self._compute_diffusion_loss(
|
||||||
|
model, input_ids, attention_mask, labels, position_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
if return_outputs:
|
||||||
|
return loss, outputs
|
||||||
|
return loss
|
||||||
|
|
||||||
|
def _cache_special_token_ids(self):
|
||||||
|
"""Cache special token IDs to avoid repeated tokenizer access."""
|
||||||
|
if self.processing_class is None:
|
||||||
|
self._special_token_ids = set()
|
||||||
|
return
|
||||||
|
|
||||||
|
tokenizer = self.processing_class
|
||||||
|
special_tokens = set()
|
||||||
|
|
||||||
|
if hasattr(tokenizer, "bos_token_id") and tokenizer.bos_token_id is not None:
|
||||||
|
special_tokens.add(tokenizer.bos_token_id)
|
||||||
|
if hasattr(tokenizer, "eos_token_id") and tokenizer.eos_token_id is not None:
|
||||||
|
special_tokens.add(tokenizer.eos_token_id)
|
||||||
|
if hasattr(tokenizer, "pad_token_id") and tokenizer.pad_token_id is not None:
|
||||||
|
special_tokens.add(tokenizer.pad_token_id)
|
||||||
|
|
||||||
|
self._special_token_ids = special_tokens
|
||||||
|
|
||||||
|
@torch.compile
|
||||||
|
def _forward_process(
|
||||||
|
self,
|
||||||
|
input_ids: torch.Tensor,
|
||||||
|
attention_mask: torch.Tensor | None = None,
|
||||||
|
labels: torch.Tensor | None = None,
|
||||||
|
eps: float = 1e-3,
|
||||||
|
min_p: float = 0.0,
|
||||||
|
max_p: float = 1.0,
|
||||||
|
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||||
|
"""
|
||||||
|
Forward noising process. A timestep is sampled along the process, and tokens are
|
||||||
|
masked with probability determined by the configured noise schedule.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_ids: Input token ids [batch_size, seq_len].
|
||||||
|
attention_mask: Attention mask [batch_size, seq_len].
|
||||||
|
labels: Labels for SFT training [batch_size, seq_len].
|
||||||
|
eps: Small epsilon value for minimum masking probability.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
noisy_batch: Input with some tokens masked.
|
||||||
|
masked_indices: Boolean mask indicating which tokens were masked.
|
||||||
|
p_mask: Masking probabilities for each token [batch_size, seq_len].
|
||||||
|
"""
|
||||||
|
batch_size, seq_len = input_ids.shape
|
||||||
|
device = input_ids.device
|
||||||
|
|
||||||
|
# Sample random timesteps for each sample in batch
|
||||||
|
t = torch.rand(batch_size, device=device)
|
||||||
|
|
||||||
|
# Calculate masking probability with epsilon
|
||||||
|
p_mask = min_p + (max_p - min_p) * (1 - eps) * t + eps # [batch_size]
|
||||||
|
p_mask = p_mask[:, None].repeat(1, seq_len) # [batch_size, seq_len]
|
||||||
|
|
||||||
|
# Don't mask padding tokens if attention_mask is provided
|
||||||
|
if attention_mask is not None:
|
||||||
|
valid_mask = attention_mask.bool()
|
||||||
|
p_mask = p_mask * valid_mask.float()
|
||||||
|
|
||||||
|
# Create mask to exclude special tokens
|
||||||
|
special_token_mask = torch.zeros_like(input_ids, dtype=torch.bool)
|
||||||
|
if self._special_token_ids:
|
||||||
|
for token_id in self._special_token_ids:
|
||||||
|
special_token_mask |= input_ids == token_id
|
||||||
|
|
||||||
|
# Create random mask based on p_mask
|
||||||
|
masked_indices = torch.rand((batch_size, seq_len), device=device) < p_mask
|
||||||
|
masked_indices = masked_indices & ~special_token_mask
|
||||||
|
if attention_mask is not None:
|
||||||
|
masked_indices = masked_indices & attention_mask.bool()
|
||||||
|
|
||||||
|
# For SFT data, only mask answer tokens
|
||||||
|
if labels is not None:
|
||||||
|
answer_mask = labels != -100
|
||||||
|
masked_indices = masked_indices & answer_mask
|
||||||
|
|
||||||
|
# Create masked input
|
||||||
|
mask_token_id = self.config.mask_token_id
|
||||||
|
noisy_batch = torch.where(masked_indices, mask_token_id, input_ids)
|
||||||
|
|
||||||
|
return noisy_batch, masked_indices, p_mask
|
||||||
|
|
||||||
|
@torch.compile
|
||||||
|
def _create_bidirectional_attention_mask(
|
||||||
|
self, input_ids: torch.Tensor, attention_mask: torch.Tensor | None = None, position_ids: torch.Tensor | None = None
|
||||||
|
) -> torch.Tensor:
|
||||||
|
"""
|
||||||
|
Create bidirectional attention mask to override default causal masking. Handles
|
||||||
|
sample-packed sequences where different samples are identified by different
|
||||||
|
attention mask values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_ids: Input token ids [batch_size, seq_len].
|
||||||
|
attention_mask: Attention mask [batch_size, seq_len]
|
||||||
|
position_ids: Position ids [batch_size, seq_len]
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bidirectional_mask: 4D attention mask [batch_size, 1, seq_len, seq_len].
|
||||||
|
"""
|
||||||
|
batch_size, seq_len = input_ids.shape
|
||||||
|
device = input_ids.device
|
||||||
|
|
||||||
|
if attention_mask is None or not self.config.sample_packing:
|
||||||
|
return torch.ones(
|
||||||
|
batch_size, 1, seq_len, seq_len, dtype=torch.bool, device=device
|
||||||
|
)
|
||||||
|
|
||||||
|
if position_ids is None:
|
||||||
|
# Create attention mask by comparing sample IDs element-wise
|
||||||
|
mask_i = attention_mask.unsqueeze(2) # [batch_size, seq_len, 1]
|
||||||
|
mask_j = attention_mask.unsqueeze(1) # [batch_size, 1, seq_len]
|
||||||
|
|
||||||
|
# Tokens can attend to each other if they have the same non-zero sample ID
|
||||||
|
bidirectional_mask = (mask_i == mask_j) & (mask_i > 0)
|
||||||
|
|
||||||
|
# Add head dimension: [batch_size, 1, seq_len, seq_len]
|
||||||
|
bidirectional_mask = bidirectional_mask.unsqueeze(1)
|
||||||
|
|
||||||
|
return bidirectional_mask
|
||||||
|
|
||||||
|
if self._config.flex_attention:
|
||||||
|
block_mask = create_bidirectional_block_mask(
|
||||||
|
input_ids, attention_mask, position_ids
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
packed_seq_mask = find_packed_sequence_indices(position_ids)
|
||||||
|
block_mask = packed_seq_mask.unsqueeze(2) == packed_seq_mask.unsqueeze(1)
|
||||||
|
|
||||||
|
return block_mask
|
||||||
|
|
||||||
|
def _compute_diffusion_loss(
|
||||||
|
self,
|
||||||
|
model: nn.Module,
|
||||||
|
input_ids: torch.Tensor,
|
||||||
|
attention_mask: torch.Tensor | None = None,
|
||||||
|
labels: torch.Tensor | None = None,
|
||||||
|
position_ids: torch.Tensor | None = None,
|
||||||
|
) -> tuple[torch.Tensor, torch.Tensor | Any]:
|
||||||
|
"""
|
||||||
|
Compute diffusion loss.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: The model to compute loss for.
|
||||||
|
input_ids: Ground truth token ids [batch_size, seq_len].
|
||||||
|
attention_mask: Attention mask [batch_size, seq_len].
|
||||||
|
labels: Labels for SFT training [batch_size, seq_len].
|
||||||
|
position_ids: Position ids [batch_size, seq_len].
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
loss: Cross-entropy loss.
|
||||||
|
metrics: Dictionary of metrics.
|
||||||
|
"""
|
||||||
|
# Apply forward process
|
||||||
|
noisy_batch, masked_indices, p_mask = self._forward_process(
|
||||||
|
input_ids, attention_mask, labels, self._config.eps, self._config.min_mask_ratio, self._config.max_mask_ratio
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create bidirectional attention mask (optional: use causal if you want strict AR behavior)
|
||||||
|
bidirectional_mask = self._create_bidirectional_attention_mask(
|
||||||
|
input_ids, attention_mask, position_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
# Forward pass
|
||||||
|
outputs = model(
|
||||||
|
input_ids=noisy_batch,
|
||||||
|
attention_mask=bidirectional_mask,
|
||||||
|
)
|
||||||
|
logits = outputs.logits # [B, L, V]
|
||||||
|
|
||||||
|
# ----- AR label shift toggle -----
|
||||||
|
use_ar_shift = False
|
||||||
|
if use_ar_shift:
|
||||||
|
# Predict token at t from logits at t-1: drop last logit step, drop first target step
|
||||||
|
logits_eff = logits[:, :-1, :]
|
||||||
|
input_ids_eff = input_ids[:, 1:]
|
||||||
|
masked_indices_eff = masked_indices[:, 1:]
|
||||||
|
p_mask_eff = p_mask[:, 1:]
|
||||||
|
labels_eff = labels[:, 1:] if labels is not None else None
|
||||||
|
else:
|
||||||
|
logits_eff = logits
|
||||||
|
input_ids_eff = input_ids
|
||||||
|
masked_indices_eff = masked_indices
|
||||||
|
p_mask_eff = p_mask
|
||||||
|
labels_eff = labels
|
||||||
|
|
||||||
|
if masked_indices_eff.sum() > 0:
|
||||||
|
valid_indices = torch.where(masked_indices_eff)
|
||||||
|
batch_indices, seq_indices = valid_indices
|
||||||
|
|
||||||
|
masked_logits = logits_eff[batch_indices, seq_indices]
|
||||||
|
masked_targets = input_ids_eff[batch_indices, seq_indices]
|
||||||
|
masked_p_mask = p_mask_eff[batch_indices, seq_indices]
|
||||||
|
|
||||||
|
# Compute cross-entropy loss without reduction
|
||||||
|
token_loss = F.cross_entropy(
|
||||||
|
masked_logits.float(), masked_targets, reduction="none"
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.config.importance_weighting:
|
||||||
|
masked_p_mask = masked_p_mask.float().clamp_min(1e-6)
|
||||||
|
weighted_loss = token_loss / masked_p_mask
|
||||||
|
else:
|
||||||
|
weighted_loss = token_loss
|
||||||
|
|
||||||
|
# Final loss: sum weighted losses, normalize
|
||||||
|
if labels_eff is not None:
|
||||||
|
# For SFT data: normalize by answer length per sample
|
||||||
|
answer_mask = labels_eff != -100
|
||||||
|
answer_lengths = answer_mask.sum(dim=1).float() # [batch_size]
|
||||||
|
|
||||||
|
# Get batch indices for masked tokens
|
||||||
|
masked_batch_indices = batch_indices
|
||||||
|
|
||||||
|
# Sum losses per sample and divide by answer length
|
||||||
|
loss_per_sample = torch.zeros(
|
||||||
|
input_ids.shape[0], device=input_ids.device
|
||||||
|
)
|
||||||
|
for i in range(input_ids.shape[0]):
|
||||||
|
sample_mask = masked_batch_indices == i
|
||||||
|
if sample_mask.any():
|
||||||
|
sample_loss = weighted_loss[sample_mask].sum()
|
||||||
|
loss_per_sample[i] = sample_loss / answer_lengths[i]
|
||||||
|
|
||||||
|
loss = loss_per_sample.mean()
|
||||||
|
else:
|
||||||
|
# Original normalization for non-SFT data
|
||||||
|
loss = weighted_loss.sum() / (input_ids.shape[0] * input_ids.shape[1])
|
||||||
|
|
||||||
|
ce_loss = token_loss.mean()
|
||||||
|
|
||||||
|
# Compute accuracy on masked tokens
|
||||||
|
with torch.no_grad():
|
||||||
|
pred_tokens = masked_logits.argmax(dim=-1)
|
||||||
|
accuracy = (pred_tokens == masked_targets).float().mean()
|
||||||
|
else:
|
||||||
|
loss = torch.tensor(0.0, device=input_ids.device, requires_grad=True)
|
||||||
|
accuracy = torch.tensor(0.0, device=input_ids.device)
|
||||||
|
ce_loss = torch.tensor(0.0, device=input_ids.device)
|
||||||
|
masked_p_mask = torch.tensor(1.0, device=input_ids.device)
|
||||||
|
|
||||||
|
# Keep eff tensors around for metrics
|
||||||
|
masked_indices_eff = masked_indices
|
||||||
|
p_mask_eff = p_mask
|
||||||
|
labels_eff = labels
|
||||||
|
|
||||||
|
# Metrics (aligned to the effective tensors)
|
||||||
|
if masked_indices_eff.any():
|
||||||
|
avg_p = p_mask_eff[masked_indices_eff].float().mean().item()
|
||||||
|
num_masked = int(masked_indices_eff.sum().item())
|
||||||
|
mask_ratio = masked_indices_eff.float().mean().item()
|
||||||
|
else:
|
||||||
|
avg_p = 0.0
|
||||||
|
num_masked = 0
|
||||||
|
mask_ratio = 0.0
|
||||||
|
|
||||||
|
metrics = {
|
||||||
|
"loss": float(loss.detach()),
|
||||||
|
"accuracy": float(accuracy.detach()),
|
||||||
|
"mask_ratio": mask_ratio,
|
||||||
|
"num_masked_tokens": (num_masked, "sum"),
|
||||||
|
"avg_p_mask": avg_p,
|
||||||
|
"ce_loss": float(ce_loss.detach()),
|
||||||
|
}
|
||||||
|
|
||||||
|
# SFT-specific metrics (aligned)
|
||||||
|
if labels_eff is not None:
|
||||||
|
answer_mask = labels_eff != -100
|
||||||
|
metrics["answer_ratio"] = answer_mask.float().mean().item()
|
||||||
|
metrics["avg_answer_length"] = answer_mask.sum(dim=1).float().mean().item()
|
||||||
|
|
||||||
|
if self.config.importance_weighting:
|
||||||
|
metrics["importance_weight_avg"] = (1.0 / masked_p_mask).mean().item()
|
||||||
|
|
||||||
|
train_eval: Literal["train", "eval"] = "train" if model.training else "eval"
|
||||||
|
self.store_metrics(metrics, train_eval=train_eval)
|
||||||
|
|
||||||
|
return loss, outputs
|
||||||
50
src/axolotl/integrations/diffusion/utils.py
Normal file
50
src/axolotl/integrations/diffusion/utils.py
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
import torch
|
||||||
|
from torch.nn.attention.flex_attention import BlockMask, create_block_mask
|
||||||
|
from transformers.masking_utils import find_packed_sequence_indices, packed_sequence_mask_function
|
||||||
|
|
||||||
|
|
||||||
|
def create_bidirectional_block_mask(
|
||||||
|
input_ids: torch.Tensor,
|
||||||
|
attention_mask: torch.Tensor | None = None,
|
||||||
|
position_ids: torch.Tensor | None = None,
|
||||||
|
) -> "BlockMask":
|
||||||
|
"""
|
||||||
|
Creates a bidirectional block mask for FlexAttention.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_ids: Input token ids [batch_size, seq_len]
|
||||||
|
attention_mask: Padding mask [batch_size, seq_len]
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
BlockMask for bidirectional attention with padding
|
||||||
|
"""
|
||||||
|
batch_size, seq_len = input_ids.shape
|
||||||
|
|
||||||
|
if position_ids is not None:
|
||||||
|
packed_seq_mask = find_packed_sequence_indices(position_ids)
|
||||||
|
mask_fn =packed_sequence_mask_function(packed_seq_mask, batch_size, seq_len)
|
||||||
|
elif attention_mask is None:
|
||||||
|
# If no padding mask, all positions can attend to all positions
|
||||||
|
def mask_fn(b, h, q_idx, kv_idx):
|
||||||
|
# Always return True for bidirectional attention
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
# Convert attention_mask to boolean if needed
|
||||||
|
attention_mask = attention_mask.bool()
|
||||||
|
|
||||||
|
def mask_fn(b, h, q_idx, kv_idx):
|
||||||
|
# Both query and key positions must be valid (not padding)
|
||||||
|
return attention_mask[b, q_idx] & attention_mask[b, kv_idx]
|
||||||
|
|
||||||
|
# Create the block mask
|
||||||
|
block_mask = create_block_mask(
|
||||||
|
mask_fn,
|
||||||
|
B=batch_size,
|
||||||
|
H=None, # Will be set by the attention layer
|
||||||
|
Q_LEN=seq_len,
|
||||||
|
KV_LEN=seq_len,
|
||||||
|
device=input_ids.device,
|
||||||
|
_compile=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
return block_mask
|
||||||
@@ -57,7 +57,7 @@ class SpectrumPlugin(BasePlugin):
|
|||||||
Spectrum Plugin to automatically generate unfrozen parameters based on SNR data.
|
Spectrum Plugin to automatically generate unfrozen parameters based on SNR data.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
base_url = "https://raw.githubusercontent.com/cognitivecomputations/spectrum/main/model_snr_results/"
|
base_url = "https://raw.githubusercontent.com/QuixiAI/spectrum/main/model_snr_results/"
|
||||||
base_path = "./model_snr_results/"
|
base_path = "./model_snr_results/"
|
||||||
snr_file_template = "snr_results_{model_name_slug}.json"
|
snr_file_template = "snr_results_{model_name_slug}.json"
|
||||||
|
|
||||||
|
|||||||
@@ -681,6 +681,23 @@ class ModelLoader:
|
|||||||
|
|
||||||
return hf_ds_cfg
|
return hf_ds_cfg
|
||||||
|
|
||||||
|
def _load_model_from_config(self) -> PreTrainedModel:
|
||||||
|
"""Load model with random initialization using from_config."""
|
||||||
|
if self.auto_model_loader in [AutoModelForCausalLM, AutoModelForVision2Seq]:
|
||||||
|
return self.auto_model_loader.from_config(config=self.model_config)
|
||||||
|
return self.auto_model_loader(config=self.model_config)
|
||||||
|
|
||||||
|
def _load_model_from_pretrained(self, model_loader_class=None) -> PreTrainedModel:
|
||||||
|
"""Load model from pretrained weights."""
|
||||||
|
loader = model_loader_class or self.auto_model_loader
|
||||||
|
kwargs = {
|
||||||
|
**self.model_kwargs,
|
||||||
|
"config": self.model_config,
|
||||||
|
"trust_remote_code": self.cfg.trust_remote_code or False,
|
||||||
|
**self.model_kwargs,
|
||||||
|
}
|
||||||
|
return loader.from_pretrained(self.base_model, **kwargs)
|
||||||
|
|
||||||
def _build_model(self) -> bool:
|
def _build_model(self) -> bool:
|
||||||
"""Load model, with load strategy depending on config."""
|
"""Load model, with load strategy depending on config."""
|
||||||
skip_move_to_device = False
|
skip_move_to_device = False
|
||||||
@@ -695,7 +712,8 @@ class ModelLoader:
|
|||||||
if self.is_fsdp_enabled:
|
if self.is_fsdp_enabled:
|
||||||
if self.cfg.fsdp_config.cpu_ram_efficient_loading:
|
if self.cfg.fsdp_config.cpu_ram_efficient_loading:
|
||||||
skip_move_to_device = True
|
skip_move_to_device = True
|
||||||
# Don't delete device_map for QLoRA + FSDP - it was set correctly in _set_device_map
|
# Don't delete device_map for QLoRA + FSDP - it was set correctly in
|
||||||
|
# _set_device_map
|
||||||
if (
|
if (
|
||||||
"device_map" in self.model_kwargs
|
"device_map" in self.model_kwargs
|
||||||
and not self.is_qlora_and_fsdp_enabled
|
and not self.is_qlora_and_fsdp_enabled
|
||||||
@@ -724,6 +742,11 @@ class ModelLoader:
|
|||||||
or self.cfg.qlora_sharded_model_loading
|
or self.cfg.qlora_sharded_model_loading
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
|
if self.cfg.reinit_weights:
|
||||||
|
LOG.warning(
|
||||||
|
"reinit_weights is not supported with sharded quantized loading. "
|
||||||
|
"Loading from pretrained weights instead."
|
||||||
|
)
|
||||||
quant_storage = self.cfg.torch_dtype
|
quant_storage = self.cfg.torch_dtype
|
||||||
quantization_config = getattr(
|
quantization_config = getattr(
|
||||||
self.model_config, "quantization_config", None
|
self.model_config, "quantization_config", None
|
||||||
@@ -739,33 +762,12 @@ class ModelLoader:
|
|||||||
quantization_config=quantization_config,
|
quantization_config=quantization_config,
|
||||||
)
|
)
|
||||||
skip_move_to_device = True
|
skip_move_to_device = True
|
||||||
elif (
|
|
||||||
self.model_config.model_type in ["llama", "llama4"]
|
|
||||||
and not self.cfg.trust_remote_code
|
|
||||||
and not self.cfg.gptq
|
|
||||||
):
|
|
||||||
# Please don't remove underscore binding without reading the fn docstring.
|
|
||||||
_ = self._configure_zero3_memory_efficient_loading()
|
|
||||||
|
|
||||||
# Load model with random initialization if specified
|
|
||||||
if self.cfg.random_init_weights:
|
|
||||||
# AutoModel classes support the from_config method
|
|
||||||
if self.auto_model_loader in [
|
|
||||||
AutoModelForCausalLM,
|
|
||||||
AutoModelForVision2Seq,
|
|
||||||
]:
|
|
||||||
self.model = self.auto_model_loader.from_config(
|
|
||||||
config=self.model_config,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.model = self.auto_model_loader(config=self.model_config)
|
|
||||||
else:
|
|
||||||
self.model = self.auto_model_loader.from_pretrained(
|
|
||||||
self.base_model,
|
|
||||||
config=self.model_config,
|
|
||||||
**self.model_kwargs,
|
|
||||||
)
|
|
||||||
elif self.model_type == "MambaLMHeadModel":
|
elif self.model_type == "MambaLMHeadModel":
|
||||||
|
if self.cfg.reinit_weights:
|
||||||
|
LOG.warning(
|
||||||
|
"reinit_weights is not supported with MambaLMHeadModel. "
|
||||||
|
"Loading from pretrained weights instead."
|
||||||
|
)
|
||||||
# FIXME this is janky at best and hacked together to make it work
|
# FIXME this is janky at best and hacked together to make it work
|
||||||
MambaLMHeadModel = fix_mamba_attn_for_loss() # pylint: disable=invalid-name
|
MambaLMHeadModel = fix_mamba_attn_for_loss() # pylint: disable=invalid-name
|
||||||
|
|
||||||
@@ -778,41 +780,27 @@ class ModelLoader:
|
|||||||
self.base_model,
|
self.base_model,
|
||||||
**self.model_kwargs,
|
**self.model_kwargs,
|
||||||
)
|
)
|
||||||
elif (
|
|
||||||
self.model_type
|
|
||||||
and self.model_type != "AutoModelForCausalLM"
|
|
||||||
and not self.cfg.trust_remote_code
|
|
||||||
):
|
|
||||||
if self.cfg.gptq:
|
|
||||||
self.model = self.auto_model_loader.from_pretrained(
|
|
||||||
self.base_model,
|
|
||||||
config=self.model_config,
|
|
||||||
trust_remote_code=self.cfg.trust_remote_code or False,
|
|
||||||
**self.model_kwargs,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.model = getattr(transformers, self.model_type).from_pretrained(
|
|
||||||
self.base_model,
|
|
||||||
config=self.model_config,
|
|
||||||
trust_remote_code=self.cfg.trust_remote_code or False,
|
|
||||||
**self.model_kwargs,
|
|
||||||
)
|
|
||||||
elif self.cfg.gptq:
|
|
||||||
self.model = self.auto_model_loader.from_pretrained(
|
|
||||||
self.base_model,
|
|
||||||
config=self.model_config,
|
|
||||||
trust_remote_code=self.cfg.trust_remote_code or False,
|
|
||||||
**self.model_kwargs,
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
# Please don't remove underscore binding without reading the fn docstring.
|
# Please don't remove underscore binding without reading the fn docstring
|
||||||
_ = self._configure_zero3_memory_efficient_loading()
|
_ = self._configure_zero3_memory_efficient_loading()
|
||||||
self.model = self.auto_model_loader.from_pretrained(
|
|
||||||
self.base_model,
|
if (
|
||||||
config=self.model_config,
|
self.model_type
|
||||||
trust_remote_code=self.cfg.trust_remote_code or False,
|
and self.model_type != "AutoModelForCausalLM"
|
||||||
**self.model_kwargs,
|
and not self.cfg.trust_remote_code
|
||||||
)
|
and not self.cfg.gptq
|
||||||
|
):
|
||||||
|
# Use model type from transformers
|
||||||
|
model_loader_class = getattr(transformers, self.model_type)
|
||||||
|
else:
|
||||||
|
# Use auto model loader (handles gptq and default cases)
|
||||||
|
model_loader_class = self.auto_model_loader
|
||||||
|
|
||||||
|
if self.cfg.reinit_weights:
|
||||||
|
self.model = self._load_model_from_config()
|
||||||
|
else:
|
||||||
|
self.model = self._load_model_from_pretrained(model_loader_class)
|
||||||
|
|
||||||
if is_deepspeed_zero3_enabled():
|
if is_deepspeed_zero3_enabled():
|
||||||
skip_move_to_device = True
|
skip_move_to_device = True
|
||||||
|
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ class PromptTokenizingStrategy(abc.ABC):
|
|||||||
) -> BatchEncoding:
|
) -> BatchEncoding:
|
||||||
empty = BatchEncoding(data={"input_ids": [], "attention_mask": []})
|
empty = BatchEncoding(data={"input_ids": [], "attention_mask": []})
|
||||||
if not prompt:
|
if not prompt:
|
||||||
LOG.warning("Empty text requested for tokenization.")
|
LOG.warning_once("Empty text requested for tokenization.")
|
||||||
return empty
|
return empty
|
||||||
|
|
||||||
result = self.tokenizer(
|
result = self.tokenizer(
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ from packaging.version import Version, parse
|
|||||||
def check_cuda_p2p_ib_support():
|
def check_cuda_p2p_ib_support():
|
||||||
if not accelerate_check_cuda_p2p_ib_support():
|
if not accelerate_check_cuda_p2p_ib_support():
|
||||||
return False
|
return False
|
||||||
unsupported_devices = {"RTX 6000 Ada", "L40S"}
|
unsupported_devices = {"RTX 6000 Ada", "L40S", "A40"}
|
||||||
try:
|
try:
|
||||||
device_names, device_count = get_gpu_info()
|
device_names, device_count = get_gpu_info()
|
||||||
if 1 < device_count < 8:
|
if 1 < device_count < 8:
|
||||||
|
|||||||
@@ -109,6 +109,12 @@ class AxolotlInputConfig(
|
|||||||
"description": "Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs"
|
"description": "Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs"
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
reinit_weights: bool | None = Field(
|
||||||
|
default=None,
|
||||||
|
json_schema_extra={
|
||||||
|
"description": "Reinitialize model weights randomly instead of loading pretrained weights"
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
trainer_cls: str | None = Field(
|
trainer_cls: str | None = Field(
|
||||||
default=None,
|
default=None,
|
||||||
|
|||||||
119
tests/e2e/test_diffusion.py
Normal file
119
tests/e2e/test_diffusion.py
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
"""E2E smoke test for diffusion training plugin."""
|
||||||
|
|
||||||
|
from axolotl.common.datasets import load_datasets
|
||||||
|
from axolotl.train import train
|
||||||
|
from axolotl.utils.config import normalize_config, validate_config
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
|
from tests.e2e.utils import check_model_output_exists
|
||||||
|
|
||||||
|
|
||||||
|
class TestDiffusion:
|
||||||
|
"""Test case for diffusion training plugin."""
|
||||||
|
|
||||||
|
def test_diffusion_smoke_test(self, temp_dir):
|
||||||
|
"""
|
||||||
|
Smoke test for diffusion training to ensure the plugin loads and trains without
|
||||||
|
error.
|
||||||
|
"""
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||||
|
"tokenizer_type": "AutoTokenizer",
|
||||||
|
"trust_remote_code": True,
|
||||||
|
"sequence_len": 256,
|
||||||
|
"val_set_size": 0.1,
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|endoftext|>",
|
||||||
|
},
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "mhenrichsen/alpaca_2k_test",
|
||||||
|
"type": "alpaca",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"num_epochs": 1,
|
||||||
|
"max_steps": 3,
|
||||||
|
"micro_batch_size": 1,
|
||||||
|
"gradient_accumulation_steps": 1,
|
||||||
|
"output_dir": temp_dir,
|
||||||
|
"learning_rate": 0.0001,
|
||||||
|
"optimizer": "adamw_torch",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"bf16": True,
|
||||||
|
"save_safetensors": True,
|
||||||
|
"save_first_step": False,
|
||||||
|
"logging_steps": 1,
|
||||||
|
"eval_steps": 3,
|
||||||
|
# Diffusion-specific config
|
||||||
|
"plugins": ["axolotl.integrations.diffusion.DiffusionPlugin"],
|
||||||
|
"diffusion_mask_token_id": 16,
|
||||||
|
"diffusion_eps": 1e-3,
|
||||||
|
"diffusion_importance_weighting": False,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
cfg = validate_config(cfg)
|
||||||
|
normalize_config(cfg)
|
||||||
|
dataset_meta = load_datasets(cfg=cfg)
|
||||||
|
|
||||||
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
|
||||||
|
def test_diffusion_sft_labels(self, temp_dir):
|
||||||
|
"""Test that diffusion training properly handles SFT data with labels."""
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||||
|
"tokenizer_type": "AutoTokenizer",
|
||||||
|
"trust_remote_code": True,
|
||||||
|
"sequence_len": 256,
|
||||||
|
"val_set_size": 0.1,
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|endoftext|>",
|
||||||
|
},
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "mhenrichsen/alpaca_2k_test",
|
||||||
|
"type": "alpaca",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"num_epochs": 1,
|
||||||
|
"max_steps": 3,
|
||||||
|
"micro_batch_size": 1,
|
||||||
|
"gradient_accumulation_steps": 1,
|
||||||
|
"output_dir": temp_dir,
|
||||||
|
"learning_rate": 0.0001,
|
||||||
|
"optimizer": "adamw_torch",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"bf16": True,
|
||||||
|
"save_safetensors": True,
|
||||||
|
"save_first_step": False,
|
||||||
|
"logging_steps": 1,
|
||||||
|
"eval_steps": 2,
|
||||||
|
# Diffusion-specific config
|
||||||
|
"plugins": ["axolotl.integrations.diffusion.DiffusionPlugin"],
|
||||||
|
"diffusion_mask_token_id": 16,
|
||||||
|
"diffusion_eps": 1e-3,
|
||||||
|
"diffusion_importance_weighting": True,
|
||||||
|
# Ensure we have proper SFT labels
|
||||||
|
"train_on_inputs": False,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
cfg = validate_config(cfg)
|
||||||
|
normalize_config(cfg)
|
||||||
|
dataset_meta = load_datasets(cfg=cfg)
|
||||||
|
|
||||||
|
# Verify that the dataset has labels
|
||||||
|
sample = dataset_meta.train_dataset[0]
|
||||||
|
assert "labels" in sample, "SFT dataset should have labels"
|
||||||
|
|
||||||
|
# Check that some labels are -100 (prompt tokens)
|
||||||
|
labels = sample["labels"]
|
||||||
|
if hasattr(labels, "tolist"):
|
||||||
|
labels = labels.tolist()
|
||||||
|
assert -100 in labels, "SFT dataset should have -100 labels for prompt tokens"
|
||||||
|
|
||||||
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
|
check_model_output_exists(temp_dir, cfg)
|
||||||
271
tests/integrations/test_diffusion.py
Normal file
271
tests/integrations/test_diffusion.py
Normal file
@@ -0,0 +1,271 @@
|
|||||||
|
"""Tests for diffusion trainer integration."""
|
||||||
|
|
||||||
|
# pylint: disable=redefined-outer-name,protected-access
|
||||||
|
|
||||||
|
from unittest.mock import Mock
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from axolotl.integrations.diffusion.trainer import DiffusionTrainer
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_tokenizer():
|
||||||
|
"""Create a mock tokenizer."""
|
||||||
|
tokenizer = Mock()
|
||||||
|
tokenizer.bos_token_id = 1
|
||||||
|
tokenizer.eos_token_id = 2
|
||||||
|
tokenizer.pad_token_id = 0
|
||||||
|
return tokenizer
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def diffusion_config():
|
||||||
|
"""Create a diffusion config."""
|
||||||
|
return DictDefault(
|
||||||
|
{
|
||||||
|
"mask_token_id": 32000,
|
||||||
|
"eps": 1e-3,
|
||||||
|
"importance_weighting": False,
|
||||||
|
"sample_packing": False,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def diffusion_trainer_instance(mock_tokenizer, diffusion_config):
|
||||||
|
"""Create a diffusion trainer instance for testing methods directly."""
|
||||||
|
# Create a minimal trainer instance just for testing methods
|
||||||
|
trainer = object.__new__(DiffusionTrainer) # Bypass __init__
|
||||||
|
trainer.config = diffusion_config
|
||||||
|
trainer._special_token_ids = {0, 1, 2} # pad, bos, eos
|
||||||
|
trainer.processing_class = mock_tokenizer
|
||||||
|
trainer.store_metrics = Mock() # Mock metrics storage
|
||||||
|
return trainer
|
||||||
|
|
||||||
|
|
||||||
|
class TestDiffusionTrainer:
|
||||||
|
"""Test the DiffusionTrainer class."""
|
||||||
|
|
||||||
|
def test_forward_process_basic(self, diffusion_trainer_instance):
|
||||||
|
"""Test basic forward process without labels."""
|
||||||
|
input_ids = torch.tensor([[1, 10, 20, 30, 2]], dtype=torch.long)
|
||||||
|
|
||||||
|
noisy_batch, masked_indices, p_mask = (
|
||||||
|
diffusion_trainer_instance._forward_process(input_ids, eps=0.1)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check shapes
|
||||||
|
assert noisy_batch.shape == input_ids.shape
|
||||||
|
assert masked_indices.shape == input_ids.shape
|
||||||
|
assert p_mask.shape == input_ids.shape
|
||||||
|
|
||||||
|
# Check that special tokens are not masked
|
||||||
|
special_token_positions = (input_ids == 1) | (input_ids == 2) | (input_ids == 0)
|
||||||
|
assert not masked_indices[special_token_positions].any()
|
||||||
|
|
||||||
|
# Check that mask token is applied
|
||||||
|
mask_token_id = diffusion_trainer_instance._config.mask_token_id
|
||||||
|
masked_positions = masked_indices
|
||||||
|
if masked_positions.any():
|
||||||
|
assert (noisy_batch[masked_positions] == mask_token_id).all()
|
||||||
|
|
||||||
|
def test_forward_process_with_labels(self, diffusion_trainer_instance):
|
||||||
|
"""Test forward process with SFT labels."""
|
||||||
|
input_ids = torch.tensor([[1, 10, 20, 30, 2]], dtype=torch.long)
|
||||||
|
labels = torch.tensor([[-100, -100, 20, 30, 2]], dtype=torch.long)
|
||||||
|
|
||||||
|
noisy_batch, masked_indices, p_mask = (
|
||||||
|
diffusion_trainer_instance._forward_process(
|
||||||
|
input_ids, labels=labels, eps=0.1
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check shapes
|
||||||
|
assert noisy_batch.shape == input_ids.shape
|
||||||
|
assert masked_indices.shape == input_ids.shape
|
||||||
|
assert p_mask.shape == input_ids.shape
|
||||||
|
|
||||||
|
# Check that only answer tokens can be masked (where labels != -100)
|
||||||
|
non_answer_mask = labels == -100
|
||||||
|
|
||||||
|
# No masking should occur on non-answer tokens
|
||||||
|
assert not masked_indices[non_answer_mask].any()
|
||||||
|
|
||||||
|
# p_mask should be the same for all positions (sampled timestep),
|
||||||
|
# but masking is only applied to answer tokens
|
||||||
|
assert p_mask.shape == input_ids.shape
|
||||||
|
# Verify that masked_indices respects the answer mask
|
||||||
|
assert not masked_indices[non_answer_mask].any()
|
||||||
|
|
||||||
|
def test_forward_process_with_attention_mask(self, diffusion_trainer_instance):
|
||||||
|
"""Test forward process with attention mask."""
|
||||||
|
input_ids = torch.tensor([[1, 10, 20, 0]], dtype=torch.long)
|
||||||
|
attention_mask = torch.tensor([[1, 1, 1, 0]], dtype=torch.long)
|
||||||
|
|
||||||
|
_, masked_indices, p_mask = diffusion_trainer_instance._forward_process(
|
||||||
|
input_ids, attention_mask=attention_mask, eps=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check that padding tokens are not masked
|
||||||
|
padding_positions = attention_mask == 0
|
||||||
|
assert not masked_indices[padding_positions].any()
|
||||||
|
assert (p_mask[padding_positions] == 0).all()
|
||||||
|
|
||||||
|
def test_bidirectional_attention_mask_no_packing(self, diffusion_trainer_instance):
|
||||||
|
"""Test bidirectional attention mask without sample packing."""
|
||||||
|
input_ids = torch.tensor([[1, 10, 20, 2]], dtype=torch.long)
|
||||||
|
|
||||||
|
mask = diffusion_trainer_instance._create_bidirectional_attention_mask(
|
||||||
|
input_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
# Should be all-to-all attention
|
||||||
|
expected_shape = (1, 1, 4, 4)
|
||||||
|
assert mask.shape == expected_shape
|
||||||
|
assert mask.all()
|
||||||
|
|
||||||
|
def test_bidirectional_attention_mask_with_packing(
|
||||||
|
self, diffusion_trainer_instance
|
||||||
|
):
|
||||||
|
"""Test bidirectional attention mask with sample packing."""
|
||||||
|
diffusion_trainer_instance._config.sample_packing = True
|
||||||
|
input_ids = torch.tensor([[1, 10, 20, 30, 40, 2]], dtype=torch.long)
|
||||||
|
# Sample IDs: first sample (1), second sample (2)
|
||||||
|
attention_mask = torch.tensor([[1, 1, 1, 2, 2, 2]], dtype=torch.long)
|
||||||
|
|
||||||
|
mask = diffusion_trainer_instance._create_bidirectional_attention_mask(
|
||||||
|
input_ids, attention_mask
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check that tokens within same sample can attend to each other
|
||||||
|
# but not across samples
|
||||||
|
assert mask[0, 0, 0, 1].item() # First sample tokens can attend to each other
|
||||||
|
assert mask[0, 0, 1, 2].item()
|
||||||
|
assert not mask[0, 0, 0, 3].item() # Can't attend across samples
|
||||||
|
assert not mask[0, 0, 2, 4].item()
|
||||||
|
assert mask[0, 0, 3, 4].item() # Second sample tokens can attend to each other
|
||||||
|
|
||||||
|
def test_compute_loss_basic(self, diffusion_trainer_instance):
|
||||||
|
"""Test basic loss computation."""
|
||||||
|
# Mock model that returns logits
|
||||||
|
mock_model = Mock()
|
||||||
|
mock_outputs = Mock()
|
||||||
|
vocab_size = 1000
|
||||||
|
seq_len = 5
|
||||||
|
mock_outputs.logits = torch.randn(1, seq_len, vocab_size, requires_grad=True)
|
||||||
|
mock_model.return_value = mock_outputs
|
||||||
|
mock_model.training = True
|
||||||
|
|
||||||
|
input_ids = torch.tensor([[1, 10, 20, 30, 2]], dtype=torch.long)
|
||||||
|
|
||||||
|
loss, outputs = diffusion_trainer_instance._compute_diffusion_loss(
|
||||||
|
mock_model, input_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check that loss is computed
|
||||||
|
assert isinstance(loss, torch.Tensor)
|
||||||
|
assert loss.requires_grad
|
||||||
|
assert outputs == mock_outputs
|
||||||
|
|
||||||
|
# Check that metrics were stored
|
||||||
|
diffusion_trainer_instance.store_metrics.assert_called_once()
|
||||||
|
|
||||||
|
def test_compute_loss_with_labels(self, diffusion_trainer_instance):
|
||||||
|
"""Test loss computation with SFT labels."""
|
||||||
|
# Mock model
|
||||||
|
mock_model = Mock()
|
||||||
|
mock_outputs = Mock()
|
||||||
|
vocab_size = 1000
|
||||||
|
seq_len = 5
|
||||||
|
mock_outputs.logits = torch.randn(1, seq_len, vocab_size, requires_grad=True)
|
||||||
|
mock_model.return_value = mock_outputs
|
||||||
|
mock_model.training = True
|
||||||
|
|
||||||
|
input_ids = torch.tensor([[1, 10, 20, 30, 2]], dtype=torch.long)
|
||||||
|
labels = torch.tensor([[-100, -100, 20, 30, 2]], dtype=torch.long)
|
||||||
|
|
||||||
|
loss, _ = diffusion_trainer_instance._compute_diffusion_loss(
|
||||||
|
mock_model, input_ids, labels=labels
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check that loss is computed
|
||||||
|
assert isinstance(loss, torch.Tensor)
|
||||||
|
assert loss.requires_grad
|
||||||
|
|
||||||
|
# Check that SFT metrics were added
|
||||||
|
call_args = diffusion_trainer_instance.store_metrics.call_args[0][0]
|
||||||
|
assert "answer_ratio" in call_args
|
||||||
|
assert "avg_answer_length" in call_args
|
||||||
|
|
||||||
|
def test_compute_loss_no_masked_tokens(self, diffusion_trainer_instance):
|
||||||
|
"""Test loss computation when no tokens are masked."""
|
||||||
|
# Mock model
|
||||||
|
mock_model = Mock()
|
||||||
|
mock_outputs = Mock()
|
||||||
|
vocab_size = 1000
|
||||||
|
seq_len = 3
|
||||||
|
mock_outputs.logits = torch.randn(1, seq_len, vocab_size)
|
||||||
|
mock_model.return_value = mock_outputs
|
||||||
|
mock_model.training = True
|
||||||
|
|
||||||
|
# Only special tokens (which won't be masked)
|
||||||
|
input_ids = torch.tensor([[1, 0, 2]], dtype=torch.long)
|
||||||
|
|
||||||
|
loss, _ = diffusion_trainer_instance._compute_diffusion_loss(
|
||||||
|
mock_model, input_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
# Loss should be zero when no tokens are masked
|
||||||
|
assert loss.item() == 0.0
|
||||||
|
assert loss.requires_grad
|
||||||
|
|
||||||
|
def test_cache_special_token_ids(self, diffusion_trainer_instance):
|
||||||
|
"""Test caching of special token IDs."""
|
||||||
|
# Should cache BOS, EOS, PAD tokens
|
||||||
|
expected_tokens = {0, 1, 2} # pad, bos, eos
|
||||||
|
assert diffusion_trainer_instance._special_token_ids == expected_tokens
|
||||||
|
|
||||||
|
def test_cache_special_token_ids_no_tokenizer(self):
|
||||||
|
"""Test caching when no tokenizer is available."""
|
||||||
|
trainer = object.__new__(DiffusionTrainer) # Bypass __init__
|
||||||
|
trainer.processing_class = None
|
||||||
|
trainer._cache_special_token_ids()
|
||||||
|
|
||||||
|
assert trainer._special_token_ids == set()
|
||||||
|
|
||||||
|
def test_main_compute_loss_interface(self, diffusion_trainer_instance):
|
||||||
|
"""Test the main compute_loss interface."""
|
||||||
|
# Mock model
|
||||||
|
mock_model = Mock()
|
||||||
|
mock_outputs = Mock()
|
||||||
|
mock_outputs.logits = torch.randn(1, 5, 1000)
|
||||||
|
mock_model.return_value = mock_outputs
|
||||||
|
mock_model.training = True
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
"input_ids": torch.tensor([[1, 10, 20, 30, 2]], dtype=torch.long),
|
||||||
|
"attention_mask": torch.tensor([[1, 1, 1, 1, 1]], dtype=torch.long),
|
||||||
|
"labels": torch.tensor([[-100, -100, 20, 30, 2]], dtype=torch.long),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test without return_outputs
|
||||||
|
loss = diffusion_trainer_instance.compute_loss(mock_model, inputs)
|
||||||
|
assert isinstance(loss, torch.Tensor)
|
||||||
|
|
||||||
|
# Test with return_outputs
|
||||||
|
loss, outputs = diffusion_trainer_instance.compute_loss(
|
||||||
|
mock_model, inputs, return_outputs=True
|
||||||
|
)
|
||||||
|
assert isinstance(loss, torch.Tensor)
|
||||||
|
assert outputs == mock_outputs
|
||||||
|
|
||||||
|
def test_missing_input_ids_raises_error(self, diffusion_trainer_instance):
|
||||||
|
"""Test that missing input_ids raises ValueError."""
|
||||||
|
mock_model = Mock()
|
||||||
|
inputs = {"attention_mask": torch.tensor([[1, 1, 1]])}
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="input_ids is required"):
|
||||||
|
diffusion_trainer_instance.compute_loss(mock_model, inputs)
|
||||||
Reference in New Issue
Block a user