Compare commits
4 Commits
dft
...
6e42def14b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6e42def14b | ||
|
|
c413480b35 | ||
|
|
8f25124269 | ||
|
|
790df757cb |
3
.github/workflows/multi-gpu-e2e.yml
vendored
3
.github/workflows/multi-gpu-e2e.yml
vendored
@@ -47,7 +47,8 @@ jobs:
|
||||
cuda_version: 13.0.0
|
||||
python_version: "3.11"
|
||||
pytorch: 2.9.1
|
||||
axolotl_extras: fbgemm-gpu
|
||||
axolotl_extras:
|
||||
# axolotl_extras: fbgemm-gpu
|
||||
num_gpus: 2
|
||||
nightly_build: "true"
|
||||
runs-on: [self-hosted, modal]
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
base_model: google/gemma-3-1b-it
|
||||
|
||||
model_type: Gemma3ForCausalLM
|
||||
cls_model_config: Gemma3TextConfig
|
||||
|
||||
# gemma3 doesn't seem to play nice with ddp
|
||||
ddp_find_unused_parameters: true
|
||||
|
||||
chat_template: gemma3
|
||||
eot_tokens:
|
||||
- <end_of_turn>
|
||||
datasets:
|
||||
- path: cgato/SlimOrcaDedupCleaned
|
||||
type: chat_template
|
||||
field_messages: conversations
|
||||
message_property_mappings:
|
||||
role: from
|
||||
content: value
|
||||
|
||||
val_set_size: 0.05
|
||||
output_dir: ./outputs/gemma-3-1b-fft-dft
|
||||
|
||||
sequence_len: 2048
|
||||
|
||||
use_dynamic_finetuning: true
|
||||
|
||||
wandb_project:
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
gradient_accumulation_steps: 4
|
||||
micro_batch_size: 2
|
||||
num_epochs: 1
|
||||
optimizer: adamw_torch_fused
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 5e-5
|
||||
|
||||
bf16: auto
|
||||
tf32: true
|
||||
|
||||
gradient_checkpointing: true
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 2
|
||||
saves_per_epoch: 1
|
||||
weight_decay: 0.0
|
||||
@@ -11,11 +11,11 @@ liger-kernel==0.6.4
|
||||
packaging==23.2
|
||||
|
||||
huggingface_hub>=0.36.0
|
||||
peft>=0.18.0
|
||||
peft>=0.18.1
|
||||
tokenizers>=0.22.1
|
||||
transformers==4.57.1
|
||||
transformers==4.57.6
|
||||
accelerate==1.12.0
|
||||
datasets==4.4.2
|
||||
datasets==4.5.0
|
||||
deepspeed>=0.18.3
|
||||
trl==0.25.1
|
||||
hf_xet==1.2.0
|
||||
|
||||
48
setup.py
48
setup.py
@@ -26,6 +26,7 @@ def parse_requirements(extras_require_map):
|
||||
_install_requires.append(line)
|
||||
try:
|
||||
xformers_version = [req for req in _install_requires if "xformers" in req][0]
|
||||
install_xformers = platform.machine() != "aarch64"
|
||||
if "Darwin" in platform.system():
|
||||
# skip packages not compatible with OSX
|
||||
skip_packages = [
|
||||
@@ -62,44 +63,63 @@ def parse_requirements(extras_require_map):
|
||||
else:
|
||||
raise ValueError("Invalid version format")
|
||||
|
||||
torch_parts = torch_version.split("+")
|
||||
if len(torch_parts) == 2:
|
||||
torch_cuda_version = torch_parts[1]
|
||||
_dependency_links.append(
|
||||
f"https://download.pytorch.org/whl/{torch_cuda_version}"
|
||||
)
|
||||
|
||||
if (major, minor) >= (2, 9):
|
||||
extras_require_map.pop("fbgemm-gpu")
|
||||
extras_require_map["fbgemm-gpu"] = ["fbgemm-gpu-genai==1.4.1"]
|
||||
extras_require_map["fbgemm-gpu"] = [
|
||||
"fbgemm-gpu==1.4.0",
|
||||
"fbgemm-gpu-genai==1.4.2",
|
||||
]
|
||||
extras_require_map["vllm"] = ["vllm==0.11.1"]
|
||||
if not install_xformers:
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
elif (major, minor) >= (2, 8):
|
||||
extras_require_map.pop("fbgemm-gpu")
|
||||
extras_require_map["fbgemm-gpu"] = ["fbgemm-gpu-genai==1.3.0"]
|
||||
extras_require_map["vllm"] = ["vllm==0.11.0"]
|
||||
if not install_xformers:
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
elif (major, minor) >= (2, 7):
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
if patch == 0:
|
||||
_install_requires.append("xformers==0.0.30")
|
||||
if install_xformers:
|
||||
_install_requires.append("xformers==0.0.30")
|
||||
# vllm 0.9.x is incompatible with latest transformers
|
||||
extras_require_map.pop("vllm")
|
||||
else:
|
||||
_install_requires.append("xformers==0.0.31")
|
||||
if install_xformers:
|
||||
_install_requires.append("xformers==0.0.31")
|
||||
extras_require_map["vllm"] = ["vllm==0.10.1"]
|
||||
elif (major, minor) >= (2, 6):
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers==0.0.29.post3")
|
||||
if install_xformers:
|
||||
_install_requires.append("xformers==0.0.29.post3")
|
||||
# since we only support 2.6.0+cu126
|
||||
_dependency_links.append("https://download.pytorch.org/whl/cu126")
|
||||
extras_require_map.pop("vllm")
|
||||
elif (major, minor) >= (2, 5):
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
if patch == 0:
|
||||
_install_requires.append("xformers==0.0.28.post2")
|
||||
else:
|
||||
_install_requires.append("xformers>=0.0.28.post3")
|
||||
if install_xformers:
|
||||
if patch == 0:
|
||||
_install_requires.append("xformers==0.0.28.post2")
|
||||
else:
|
||||
_install_requires.append("xformers>=0.0.28.post3")
|
||||
extras_require_map.pop("vllm")
|
||||
elif (major, minor) >= (2, 4):
|
||||
extras_require_map.pop("vllm")
|
||||
if patch == 0:
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers>=0.0.27")
|
||||
else:
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers==0.0.28.post1")
|
||||
if install_xformers:
|
||||
if patch == 0:
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers>=0.0.27")
|
||||
else:
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers==0.0.28.post1")
|
||||
else:
|
||||
raise ValueError("axolotl requires torch>=2.4")
|
||||
|
||||
|
||||
@@ -4,4 +4,4 @@ import pkgutil
|
||||
|
||||
__path__ = pkgutil.extend_path(__path__, __name__) # Make this a namespace package
|
||||
|
||||
__version__ = "0.13.0.dev"
|
||||
__version__ = "0.13.1"
|
||||
|
||||
@@ -373,11 +373,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
||||
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html
|
||||
data_collator_kwargs["pad_to_multiple_of"] = multiple
|
||||
|
||||
if self.cfg.use_dynamic_finetuning:
|
||||
from axolotl.monkeypatch.loss.dft import dft_loss
|
||||
|
||||
trainer_kwargs["compute_loss_func"] = dft_loss
|
||||
|
||||
trainer_cls = self._get_trainer_cls()
|
||||
|
||||
trainer_kwargs, trainer_cls = self.hook_pre_create_trainer(
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
"""Dynamic Fine-Tuning (DFT) loss implementation"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def selective_log_softmax(logits, index):
|
||||
"""Memory-efficient log_softmax -> gather"""
|
||||
if logits.dtype in [torch.float32, torch.float64]:
|
||||
selected_logits = torch.gather(
|
||||
logits, dim=-1, index=index.unsqueeze(-1)
|
||||
).squeeze(-1)
|
||||
logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
|
||||
per_token_logps = selected_logits - logsumexp_values
|
||||
else:
|
||||
per_token_logps = []
|
||||
for row_logits, row_labels in zip(logits, index, strict=True):
|
||||
row_logps = F.log_softmax(row_logits, dim=-1)
|
||||
row_per_token_logps = row_logps.gather(
|
||||
dim=-1, index=row_labels.unsqueeze(-1)
|
||||
).squeeze(-1)
|
||||
per_token_logps.append(row_per_token_logps)
|
||||
per_token_logps = torch.stack(per_token_logps)
|
||||
return per_token_logps
|
||||
|
||||
|
||||
def get_dft_loss(ignore_index: int = -100):
|
||||
"""Creates DFT loss function"""
|
||||
|
||||
def for_causal_lm_dft_loss(
|
||||
logits,
|
||||
labels,
|
||||
vocab_size: int = None,
|
||||
num_items_in_batch: Optional[int] = None,
|
||||
ignore_index: int = -100,
|
||||
shift_labels: Optional[torch.Tensor] = None,
|
||||
**kwargs,
|
||||
) -> torch.Tensor:
|
||||
"""DFT loss: -exp(logprobs).detach() * logprobs"""
|
||||
if shift_labels is None:
|
||||
# Shift so that tokens < n predict n
|
||||
labels = F.pad(labels, (0, 1), value=ignore_index)
|
||||
shift_labels = labels[..., 1:].contiguous()
|
||||
|
||||
shift_labels = shift_labels.to(logits.device)
|
||||
|
||||
# Create loss mask
|
||||
loss_mask = shift_labels != ignore_index
|
||||
shift_labels_masked = shift_labels.clone()
|
||||
shift_labels_masked[~loss_mask] = 0
|
||||
|
||||
# Compute log probabilities
|
||||
logprobs = selective_log_softmax(logits, shift_labels_masked)
|
||||
|
||||
# DFT loss: -exp(logprobs).detach() * logprobs
|
||||
per_token_loss = -logprobs.exp().detach() * logprobs
|
||||
|
||||
# Sum over valid tokens and normalize
|
||||
if num_items_in_batch is None:
|
||||
num_items_in_batch = loss_mask.sum()
|
||||
|
||||
loss = (per_token_loss * loss_mask).sum() / num_items_in_batch
|
||||
return loss
|
||||
|
||||
return for_causal_lm_dft_loss
|
||||
|
||||
|
||||
def dft_loss(outputs, labels, num_items_in_batch=None):
|
||||
"""DFT loss compatible with Trainer.compute_loss_func signature.
|
||||
|
||||
This function is designed to be passed to Trainer's compute_loss_func parameter.
|
||||
"""
|
||||
ignore_index = -100
|
||||
|
||||
# Shift labels for causal LM
|
||||
labels = F.pad(labels, (0, 1), value=ignore_index)
|
||||
shift_labels = labels[..., 1:].contiguous()
|
||||
shift_labels = shift_labels.to(outputs.logits.device)
|
||||
|
||||
# Create loss mask
|
||||
loss_mask = shift_labels != ignore_index
|
||||
shift_labels_masked = shift_labels.clone()
|
||||
shift_labels_masked[~loss_mask] = 0
|
||||
|
||||
# Compute log probabilities
|
||||
logprobs = selective_log_softmax(outputs.logits, shift_labels_masked)
|
||||
|
||||
# DFT loss: -exp(logprobs).detach() * logprobs
|
||||
per_token_loss = -logprobs.exp().detach() * logprobs
|
||||
|
||||
# Sum over valid tokens and normalize
|
||||
if num_items_in_batch is None:
|
||||
num_items_in_batch = loss_mask.sum()
|
||||
|
||||
loss = (per_token_loss * loss_mask).sum() / num_items_in_batch
|
||||
return loss
|
||||
@@ -676,10 +676,6 @@ class AxolotlInputConfig(
|
||||
"description": "Number of chunks to use for chunked cross entropy loss"
|
||||
},
|
||||
)
|
||||
use_dynamic_finetuning: bool | None = Field(
|
||||
default=None,
|
||||
json_schema_extra={"description": "Enable Dynamic Fine-Tuning loss (DFT)"},
|
||||
)
|
||||
|
||||
tiled_mlp: bool | None = Field(
|
||||
default=None,
|
||||
|
||||
Reference in New Issue
Block a user