Compare commits

..

3 Commits

Author SHA1 Message Date
Wing Lian
efa1209a92 add smoke test training 2024-10-30 15:40:27 -04:00
Wing Lian
67b9e31bbc make sure to set alternate optimizer and set lr and eps from adam 2024-10-30 15:33:37 -04:00
Wing Lian
ad60916323 add soap optimizer support 2024-10-30 15:33:37 -04:00
22 changed files with 740 additions and 517 deletions

View File

@@ -40,7 +40,7 @@ jobs:
cuda_version: 12.4.1
cudnn_version: ""
python_version: "3.11"
pytorch: 2.5.1
pytorch: 2.5.0
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
steps:
- name: Checkout

View File

@@ -82,6 +82,13 @@ jobs:
num_gpus: 1
axolotl_extras: mamba-ssm
nightly_build: "true"
- cuda: 121
cuda_version: 12.1.1
python_version: "3.11"
pytorch: 2.3.1
num_gpus: 1
axolotl_extras: mamba-ssm
nightly_build: "true"
- cuda: 124
cuda_version: 12.4.1
python_version: "3.11"

View File

@@ -72,52 +72,12 @@ jobs:
run: |
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
docker-e2e-tests-1st:
if: github.repository_owner == 'axolotl-ai-cloud'
# this job needs to be run on self-hosted GPU runners...
runs-on: [self-hosted, modal]
timeout-minutes: 90
needs: [pre-commit, pytest]
strategy:
fail-fast: false
matrix:
include:
- cuda: 124
cuda_version: 12.4.1
python_version: "3.11"
pytorch: 2.4.1
num_gpus: 1
axolotl_extras:
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install Modal
run: |
python -m pip install --upgrade pip
pip install modal==0.63.64 jinja2
- name: Update env vars
run: |
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
- name: Run tests job on Modal
run: |
modal run cicd.tests
docker-e2e-tests:
if: github.repository_owner == 'axolotl-ai-cloud'
# this job needs to be run on self-hosted GPU runners...
runs-on: [self-hosted, modal]
timeout-minutes: 90
needs: [pre-commit, pytest, docker-e2e-tests-1st]
needs: [pre-commit, pytest]
strategy:
fail-fast: false
@@ -129,6 +89,18 @@ jobs:
pytorch: 2.3.1
num_gpus: 1
axolotl_extras: mamba-ssm
- cuda: 121
cuda_version: 12.1.1
python_version: "3.11"
pytorch: 2.3.1
num_gpus: 1
axolotl_extras: mamba-ssm
- cuda: 124
cuda_version: 12.4.1
python_version: "3.11"
pytorch: 2.4.1
num_gpus: 1
axolotl_extras:
- cuda: 124
cuda_version: 12.4.1
python_version: "3.11"

View File

@@ -562,8 +562,7 @@ plugins:
- axolotl.integrations.liger.LigerPlugin
liger_rope: true
liger_rms_norm: true
liger_glu_activation: true
liger_layer_norm: true
liger_swiglu: true
liger_fused_linear_cross_entropy: true
```

View File

@@ -35,7 +35,3 @@ RUN git lfs install --skip-repo && \
pip3 install awscli && \
# The base image ships with `pydantic==1.8.2` which is not working
pip3 install -U --no-cache-dir pydantic==1.10.10
RUN if [ "$PYTHON_VERSION" != "2.5.1" ] ; then \
pip3 install flash-attn==2.6.3; \
fi

View File

@@ -9,7 +9,7 @@ strict: false
plugins:
- axolotl.integrations.liger.LigerPlugin
liger_rms_norm: true
liger_glu_activation: true
liger_swiglu: true
liger_fused_linear_cross_entropy: true
chat_template: deepseek_v2

View File

@@ -4,7 +4,7 @@ plugins:
- axolotl.integrations.liger.LigerPlugin
liger_rope: true
liger_rms_norm: true
liger_glu_activation: true
liger_swiglu: true
liger_fused_linear_cross_entropy: true
strict: false

View File

@@ -1,10 +1,10 @@
--extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
packaging==23.2
peft==0.13.2
transformers==4.46.1
transformers==4.46.0
tokenizers>=0.20.1
bitsandbytes==0.44.1
accelerate==1.1.0
accelerate==1.0.1
datasets==3.0.1
deepspeed==0.15.3
pydantic==2.6.3
@@ -34,7 +34,7 @@ tensorboard
python-dotenv==1.0.1
autoawq>=0.2.5
triton>=2.3.0
liger-kernel==0.4.0
liger-kernel==0.3.0
mamba-ssm==1.2.0.post1

View File

@@ -48,7 +48,6 @@ from trl import (
)
from trl.trainer.utils import RewardDataCollatorWithPadding, pad_to_length
from axolotl.integrations.base import PluginManager
from axolotl.monkeypatch.multipack import SUPPORTED_MULTIPACK_MODEL_TYPES
from axolotl.monkeypatch.relora import ReLoRACallback, ReLoRAScheduler
from axolotl.utils import is_comet_available, is_mlflow_available
@@ -436,7 +435,13 @@ class AxolotlTrainer(SchedulerMixin, Trainer):
if (
self.args.loraplus_lr_ratio is None
and self.args.alternate_optimizer
not in ["optimi_adamw", "ao_adamw_8bit", "ao_adamw_4bit", "ao_adamw_fp8"]
not in [
"optimi_adamw",
"ao_adamw_8bit",
"ao_adamw_4bit",
"ao_adamw_fp8",
"soap",
]
):
return super().create_optimizer()
@@ -479,6 +484,25 @@ class AxolotlTrainer(SchedulerMixin, Trainer):
loraplus_lr_embedding=loraplus_lr_embedding,
**optimizer_kwargs,
)
elif self.args.alternate_optimizer == "soap":
from axolotl.utils.optimizers.soap import SOAP
optim_args = {
"lr": optimizer_kwargs.pop("lr"),
"eps": optimizer_kwargs.pop("eps"),
}
if self.cfg.optim_args:
optim_args.update(self.cfg.optim_args)
optim_args["betas"] = (
self.args.optim_soap_beta1,
self.args.optim_soap_beta2,
)
self.optimizer = SOAP( # pylint: disable=attribute-defined-outside-init
optimizer_grouped_parameters,
**optim_args,
)
elif self.args.alternate_optimizer == "optimi_adamw":
from optimi import AdamW
@@ -896,13 +920,13 @@ class AxolotlTrainer(SchedulerMixin, Trainer):
for key, value in metrics.items():
self._stored_metrics[train_eval][key].append(value)
def _save_checkpoint(self, model, trial, **kwargs):
def _save_checkpoint(self, model, trial, metrics=None):
# make sure the checkpoint dir exists, since trainer is flakey
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
run_dir = self._get_output_dir(trial=trial)
output_dir = os.path.join(run_dir, checkpoint_folder)
os.makedirs(output_dir, exist_ok=True)
return super()._save_checkpoint(model, trial, **kwargs)
return super()._save_checkpoint(model, trial, metrics=metrics)
class AxolotlMambaTrainer(AxolotlTrainer):
@@ -1148,12 +1172,6 @@ class TrainerBuilderBase(abc.ABC):
def get_callbacks(self) -> List[TrainerCallback]:
callbacks = []
plugin_manager = PluginManager.get_instance()
callbacks.extend(
plugin_manager.add_callbacks_pre_trainer(cfg=self.cfg, model=self.model)
)
if self.cfg.use_wandb:
callbacks.append(
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
@@ -1180,17 +1198,11 @@ class TrainerBuilderBase(abc.ABC):
return callbacks
@abstractmethod
def get_post_trainer_create_callbacks(self, trainer):
"""
Callbacks added after the trainer is created, usually b/c these need access to the trainer
"""
callbacks = []
plugin_manager = PluginManager.get_instance()
callbacks.extend(
plugin_manager.add_callbacks_post_trainer(cfg=self.cfg, trainer=trainer)
)
return callbacks
def hook_pre_create_training_args(self, training_arguments_kwargs):
# TODO
@@ -1236,7 +1248,7 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
return callbacks
def get_post_trainer_create_callbacks(self, trainer):
callbacks = super().get_post_trainer_create_callbacks(trainer=trainer)
callbacks = []
if self.cfg.use_wandb and self.cfg.eval_table_size > 0:
LogPredictionCallback = log_prediction_callback_factory(
trainer, self.tokenizer, "wandb"
@@ -1626,10 +1638,12 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
trainer_kwargs["max_length"] = self.cfg.sequence_len
if self.cfg.optimizer in [
# pylint: disable=duplicate-code
"optimi_adamw",
"ao_adamw_4bit",
"ao_adamw_8bit",
"ao_adamw_fp8",
"soap",
]:
# Set default so transformers doesn't throw
training_arguments_kwargs["optim"] = "adamw_hf"
@@ -1804,7 +1818,7 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
return callbacks
def get_post_trainer_create_callbacks(self, trainer):
callbacks = super().get_post_trainer_create_callbacks(trainer=trainer)
callbacks = []
return callbacks
def build_training_arguments(self, total_num_steps):
@@ -2013,11 +2027,11 @@ class HFPPOTrainerBuilder(TrainerBuilderBase):
"""
def get_callbacks(self):
callbacks = super().get_callbacks()
callbacks = []
return callbacks
def get_post_trainer_create_callbacks(self, trainer):
callbacks = super().get_post_trainer_create_callbacks(trainer=trainer)
callbacks = []
return callbacks
def build(self, total_num_steps):

View File

@@ -18,10 +18,9 @@ Plugins can be used to integrate third-party models, modify the training process
To create a new plugin, you need to inherit from the BasePlugin class and implement the required methods.
"""
import collections
import importlib
import logging
from typing import OrderedDict
from typing import List
class BasePlugin:
@@ -48,7 +47,7 @@ class BasePlugin:
Initializes the BasePlugin.
"""
def register(self, cfg): # pylint: disable=unused-argument
def register(self, cfg):
"""
Registers the plugin with the given configuration.
@@ -64,7 +63,7 @@ class BasePlugin:
Returns a pydantic model for the plugin's input arguments.
"""
def pre_model_load(self, cfg): # pylint: disable=unused-argument
def pre_model_load(self, cfg):
"""
Performs actions before the model is loaded.
@@ -75,7 +74,7 @@ class BasePlugin:
None
"""
def post_model_load(self, cfg, model): # pylint: disable=unused-argument
def post_model_load(self, cfg, model):
"""
Performs actions after the model is loaded.
@@ -87,7 +86,7 @@ class BasePlugin:
None
"""
def pre_lora_load(self, cfg, model): # pylint: disable=unused-argument
def pre_lora_load(self, cfg, model):
"""
Performs actions before LoRA weights are loaded.
@@ -99,7 +98,7 @@ class BasePlugin:
None
"""
def post_lora_load(self, cfg, model): # pylint: disable=unused-argument
def post_lora_load(self, cfg, model):
"""
Performs actions after LoRA weights are loaded.
@@ -111,7 +110,7 @@ class BasePlugin:
None
"""
def create_optimizer(self, cfg, trainer): # pylint: disable=unused-argument
def create_optimizer(self, cfg, trainer):
"""
Creates and returns an optimizer for training.
@@ -123,9 +122,7 @@ class BasePlugin:
object: The created optimizer.
"""
def create_lr_scheduler(
self, cfg, trainer, optimizer
): # pylint: disable=unused-argument
def create_lr_scheduler(self, cfg, trainer, optimizer):
"""
Creates and returns a learning rate scheduler.
@@ -138,7 +135,7 @@ class BasePlugin:
object: The created learning rate scheduler.
"""
def add_callbacks_pre_trainer(self, cfg, model): # pylint: disable=unused-argument
def add_callbacks_pre_trainer(self, cfg, model):
"""
Adds callbacks to the trainer before training.
@@ -149,11 +146,8 @@ class BasePlugin:
Returns:
List[callable]: A list of callback functions to be added to the TrainingArgs
"""
return []
def add_callbacks_post_trainer(
self, cfg, trainer
): # pylint: disable=unused-argument
def add_callbacks_post_trainer(self, cfg, trainer):
"""
Adds callbacks to the trainer after training.
@@ -164,9 +158,8 @@ class BasePlugin:
Returns:
List[callable]: A list of callback functions to be added to the TrainingArgs
"""
return []
def post_train(self, cfg, model): # pylint: disable=unused-argument
def post_train(self, cfg, model):
"""
Performs actions after training is complete.
@@ -178,7 +171,7 @@ class BasePlugin:
None
"""
def post_train_unload(self, cfg): # pylint: disable=unused-argument
def post_train_unload(self, cfg):
"""
Performs actions after training is complete and the model is unloaded.
@@ -234,7 +227,7 @@ class PluginManager:
pre_model_load(cfg): Calls the pre_model_load method of all registered plugins.
"""
plugins: OrderedDict[str, BasePlugin] = collections.OrderedDict()
plugins: List[BasePlugin] = []
_instance = None
@@ -244,7 +237,7 @@ class PluginManager:
"""
if cls._instance is None:
cls._instance = super(PluginManager, cls).__new__(cls)
cls._instance.plugins = collections.OrderedDict()
cls._instance.plugins: List[BasePlugin] = []
return cls._instance
@staticmethod
@@ -272,7 +265,7 @@ class PluginManager:
"""
try:
plugin = load_plugin(plugin_name)
self.plugins[plugin_name] = plugin
self.plugins.append(plugin)
except ImportError:
logging.error(f"Failed to load plugin: {plugin_name}")
@@ -284,7 +277,7 @@ class PluginManager:
list[str]: A list of Pydantic classes for all registered plugins' input arguments.'
"""
input_args = []
for plugin in self.plugins.values():
for plugin in self.plugins:
input_args_from_plugin = plugin.get_input_args()
if input_args_from_plugin is not None:
input_args.append(input_args_from_plugin)
@@ -300,7 +293,7 @@ class PluginManager:
Returns:
None
"""
for plugin in self.plugins.values():
for plugin in self.plugins:
plugin.pre_model_load(cfg)
def post_model_load(self, cfg, model):
@@ -314,7 +307,7 @@ class PluginManager:
Returns:
None
"""
for plugin in self.plugins.values():
for plugin in self.plugins:
plugin.post_model_load(cfg, model)
def pre_lora_load(self, cfg, model):
@@ -328,7 +321,7 @@ class PluginManager:
Returns:
None
"""
for plugin in self.plugins.values():
for plugin in self.plugins:
plugin.pre_lora_load(cfg, model)
def post_lora_load(self, cfg, model):
@@ -342,7 +335,7 @@ class PluginManager:
Returns:
None
"""
for plugin in self.plugins.values():
for plugin in self.plugins:
plugin.post_lora_load(cfg, model)
def create_optimizer(self, cfg, trainer):
@@ -356,7 +349,7 @@ class PluginManager:
Returns:
object: The created optimizer, or None if none was found.
"""
for plugin in self.plugins.values():
for plugin in self.plugins:
optimizer = plugin.create_optimizer(cfg, trainer)
if optimizer is not None:
return optimizer
@@ -374,7 +367,7 @@ class PluginManager:
Returns:
object: The created learning rate scheduler, or None if none was found.
"""
for plugin in self.plugins.values():
for plugin in self.plugins:
scheduler = plugin.create_lr_scheduler(cfg, trainer, optimizer)
if scheduler is not None:
return scheduler
@@ -392,7 +385,7 @@ class PluginManager:
List[callable]: A list of callback functions to be added to the TrainingArgs.
"""
callbacks = []
for plugin in self.plugins.values():
for plugin in self.plugins:
callbacks.extend(plugin.add_callbacks_pre_trainer(cfg, model))
return callbacks
@@ -408,7 +401,7 @@ class PluginManager:
List[callable]: A list of callback functions to be added to the TrainingArgs.
"""
callbacks = []
for plugin in self.plugins.values():
for plugin in self.plugins:
callbacks.extend(plugin.add_callbacks_post_trainer(cfg, trainer))
return callbacks
@@ -423,5 +416,5 @@ class PluginManager:
Returns:
None
"""
for plugin in self.plugins.values():
for plugin in self.plugins:
plugin.post_train_unload(cfg)

View File

@@ -18,23 +18,20 @@ Module for the Plugin for LIGER integraton with Axolotl.
Liger Kernel is the collection of Triton-native kernels for LLM Training.
It is designed to be performant, correct, and light-weight.
"""
import inspect
import logging
import sys
from functools import partial
from liger_kernel.transformers.cross_entropy import LigerCrossEntropyLoss
from liger_kernel.transformers.monkey_patch import MODEL_TYPE_TO_APPLY_LIGER_FN
from liger_kernel.transformers.geglu import LigerGEGLUMLP
from liger_kernel.transformers.rms_norm import LigerRMSNorm
from liger_kernel.transformers.rope import liger_rotary_pos_emb
from liger_kernel.transformers.swiglu import LigerSwiGLUMLP
from axolotl.integrations.base import BasePlugin
from ...utils.distributed import zero_only
from .args import LigerArgs # pylint: disable=unused-import. # noqa: F401
LOG = logging.getLogger("axolotl.integrations.liger")
class LigerPlugin(BasePlugin):
"""
@@ -45,31 +42,59 @@ class LigerPlugin(BasePlugin):
return "axolotl.integrations.liger.LigerArgs"
def pre_model_load(self, cfg):
if cfg.model_config_type in MODEL_TYPE_TO_APPLY_LIGER_FN:
apply_liger_fn = MODEL_TYPE_TO_APPLY_LIGER_FN[cfg.model_config_type]
liger_fn_sig = inspect.signature(apply_liger_fn)
kwargs = {}
if "rope" in liger_fn_sig.parameters:
kwargs["rope"] = cfg.liger_rope
if "cross_entropy" in liger_fn_sig.parameters:
kwargs["cross_entropy"] = cfg.liger_cross_entropy
if "fused_linear_cross_entropy" in liger_fn_sig.parameters:
kwargs[
"fused_linear_cross_entropy"
] = cfg.liger_fused_linear_cross_entropy
if "rms_norm" in liger_fn_sig.parameters:
kwargs["rms_norm"] = cfg.liger_rms_norm
if "layer_norm" in liger_fn_sig.parameters:
kwargs["layer_norm"] = cfg.liger_layer_norm
if "geglu" in liger_fn_sig.parameters:
kwargs["geglu"] = cfg.liger_glu_activation
elif "swiglu" in liger_fn_sig.parameters:
kwargs["swiglu"] = cfg.liger_glu_activation
with zero_only():
LOG.info(
f"Applying LIGER to {cfg.model_config_type} with kwargs: {kwargs}"
if cfg.model_config_type == "llama":
from liger_kernel.transformers.model.llama import (
lce_forward as llama_lce_forward,
)
from transformers.models.llama import modeling_llama
if cfg.liger_rope:
modeling_llama.apply_rotary_pos_emb = liger_rotary_pos_emb
if cfg.liger_rms_norm:
modeling_llama.LlamaRMSNorm = LigerRMSNorm
if cfg.liger_swiglu:
modeling_llama.LlamaMLP = LigerSwiGLUMLP
if cfg.liger_cross_entropy:
modeling_llama.CrossEntropyLoss = LigerCrossEntropyLoss
elif cfg.liger_fused_linear_cross_entropy:
modeling_llama.LlamaForCausalLM.forward = llama_lce_forward
elif cfg.model_config_type == "mistral":
from liger_kernel.transformers.model.mistral import (
lce_forward as mistral_lce_forward,
)
from transformers.models.mistral import modeling_mistral
if cfg.liger_rope:
modeling_mistral.apply_rotary_pos_emb = liger_rotary_pos_emb
if cfg.liger_rms_norm:
modeling_mistral.MistralRMSNorm = LigerRMSNorm
if cfg.liger_swiglu:
modeling_mistral.MistralMLP = LigerSwiGLUMLP
if cfg.liger_cross_entropy:
modeling_mistral.CrossEntropyLoss = LigerCrossEntropyLoss
if cfg.liger_fused_linear_cross_entropy:
modeling_mistral.MistralForCausalLM.forward = mistral_lce_forward
elif cfg.model_config_type == "gemma":
from liger_kernel.transformers.model.gemma import (
lce_forward as gemma_lce_forward,
)
from transformers.models.gemma import modeling_gemma
if cfg.liger_rope:
modeling_gemma.apply_rotary_pos_emb = liger_rotary_pos_emb
if cfg.liger_rms_norm:
modeling_gemma.GemmaRMSNorm = partial(
LigerRMSNorm, offset=1.0, init_fn="zeros", casting_mode="gemma"
)
apply_liger_fn(**kwargs)
if cfg.liger_swiglu:
modeling_gemma.GemmaMLP = LigerGEGLUMLP
if cfg.liger_cross_entropy:
modeling_gemma.CrossEntropyLoss = LigerCrossEntropyLoss
if cfg.liger_fused_linear_cross_entropy:
modeling_gemma.GemmaForCausalLM.forward = gemma_lce_forward
elif cfg.model_config_type == "jamba":
from transformers.models.jamba import modeling_jamba
@@ -79,12 +104,30 @@ class LigerPlugin(BasePlugin):
modeling_jamba.apply_rotary_pos_emb = liger_rotary_pos_emb
if cfg.liger_rms_norm:
modeling_jamba.JambaRMSNorm = LigerRMSNorm
if cfg.liger_glu_activation:
if cfg.liger_swiglu:
modeling_jamba.JambaMLP = LigerSwiGLUMLP
if cfg.liger_cross_entropy:
modeling_jamba.CrossEntropyLoss = LigerCrossEntropyLoss
if cfg.liger_fused_linear_cross_entropy:
modeling_jamba.JambaForCausalLM.forward = jamba_lce_forward
elif cfg.model_config_type == "qwen2":
from liger_kernel.transformers.model.qwen2 import (
lce_forward as qwen2_lce_forward,
)
from transformers.models.qwen2 import modeling_qwen2
if cfg.liger_rope:
modeling_qwen2.apply_rotary_pos_emb = liger_rotary_pos_emb
if cfg.liger_rms_norm:
modeling_qwen2.Qwen2RMSNorm = LigerRMSNorm
if cfg.liger_swiglu:
modeling_qwen2.Qwen2MLP = LigerSwiGLUMLP
if cfg.liger_cross_entropy:
modeling_qwen2.CrossEntropyLoss = LigerCrossEntropyLoss
if cfg.liger_fused_linear_cross_entropy:
modeling_qwen2.Qwen2ForCausalLM.forward = qwen2_lce_forward
elif cfg.model_config_type == "deepseek_v2":
from accelerate import init_empty_weights
from transformers import AutoModelForCausalLM
@@ -103,9 +146,44 @@ class LigerPlugin(BasePlugin):
logging.warning("Fused liger_rope is not supported for DeepseekV2.")
if cfg.liger_rms_norm:
modeling_mod.DeepseekV2RMSNorm = LigerRMSNorm
if cfg.liger_glu_activation:
if cfg.liger_swiglu:
modeling_mod.DeepseekV2MLP.forward = LigerSwiGLUMLP.forward
if cfg.liger_cross_entropy:
modeling_mod.CrossEntropyLoss = LigerCrossEntropyLoss
if cfg.liger_fused_linear_cross_entropy:
modeling_mod.DeepseekV2ForCausalLM.forward = deepseekv2_lce_forward
elif cfg.model_config_type == "gemma2":
from transformers.models.gemma2 import modeling_gemma2
if cfg.liger_rope:
modeling_gemma2.apply_rotary_pos_emb = liger_rotary_pos_emb
if cfg.liger_rms_norm:
modeling_gemma2.Gemma2RMSNorm = partial(
LigerRMSNorm, offset=1.0, init_fn="zeros", casting_mode="gemma"
)
if cfg.liger_swiglu:
modeling_gemma2.Gemma2MLP = LigerGEGLUMLP
if cfg.liger_cross_entropy:
modeling_gemma2.CrossEntropyLoss = LigerCrossEntropyLoss
if cfg.liger_fused_linear_cross_entropy:
logging.warning(
"Fused linear cross entropy is not supported for Gemma 2."
)
elif cfg.model_config_type == "phi3":
from liger_kernel.transformers.model.phi3 import (
lce_forward as phi3_lce_forward,
)
from transformers.models.phi3 import modeling_phi3
if cfg.liger_rope:
modeling_phi3.apply_rotary_pos_emb = liger_rotary_pos_emb
if cfg.liger_rms_norm:
modeling_phi3.Phi3RMSNorm = LigerRMSNorm
if cfg.liger_swiglu:
modeling_phi3.Phi3MLP = LigerSwiGLUMLP
if cfg.liger_cross_entropy:
modeling_phi3.CrossEntropyLoss = LigerCrossEntropyLoss
if cfg.liger_fused_linear_cross_entropy:
modeling_phi3.Phi3ForCausalLM.forward = phi3_lce_forward

View File

@@ -15,12 +15,9 @@
"""
Module for handling LIGER input arguments.
"""
import logging
from typing import Optional
from pydantic import BaseModel, model_validator
LOG = logging.getLogger("axolotl.integrations.liger.args")
from pydantic import BaseModel
class LigerArgs(BaseModel):
@@ -30,24 +27,6 @@ class LigerArgs(BaseModel):
liger_rope: Optional[bool] = None
liger_rms_norm: Optional[bool] = None
liger_layer_norm: Optional[bool] = None
liger_swiglu: Optional[bool] = None
liger_glu_activation: Optional[bool] = None
liger_cross_entropy: Optional[bool] = None
liger_fused_linear_cross_entropy: Optional[bool] = None
@model_validator(mode="before")
@classmethod
def check_deprecated_swiglu(cls, data):
if data.get("liger_swiglu") is not None:
if data.get("liger_glu_activation") is not None:
raise ValueError(
"You cannot have both `liger_swiglu` and `liger_glu_activation` set."
)
LOG.warning(
"The 'liger_swiglu' argument is deprecated and will be removed in a future release. "
"Please use 'liger_glu_activation' instead."
)
data["liger_glu_activation"] = data.pop("liger_swiglu")
return data

View File

@@ -427,6 +427,7 @@ class HyperparametersConfig(BaseModel):
"ao_adamw_4bit",
"ao_adamw_8bit",
"ao_adamw_fp8",
"soap",
],
]
] = OptimizerNames.ADAMW_HF.value
@@ -439,6 +440,10 @@ class HyperparametersConfig(BaseModel):
"help": "The target modules to optimize, i.e. the module names that you would like to train."
},
)
optim_soap_beta1: Optional[float] = None
optim_soap_beta2: Optional[float] = None
torchdistx_path: Optional[str] = None
lr_scheduler: Optional[Union[SchedulerType, Literal["one_cycle"]]] = "cosine"
lr_scheduler_kwargs: Optional[Dict[str, Any]] = None

View File

@@ -2,11 +2,9 @@
import functools
import logging
import time
from pathlib import Path
from typing import List, Optional, Tuple, Union
import requests
from datasets import (
Dataset,
DatasetDict,
@@ -55,28 +53,6 @@ from axolotl.utils.trainer import (
LOG = logging.getLogger("axolotl")
def retry_on_request_exceptions(max_retries=3, delay=1):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs): # pylint: disable=inconsistent-return-statements
for attempt in range(max_retries):
try:
return func(*args, **kwargs)
except (
requests.exceptions.ReadTimeout,
requests.exceptions.ConnectionError,
) as exc:
if attempt < max_retries - 1:
time.sleep(delay)
else:
raise exc
return wrapper
return decorator
@retry_on_request_exceptions(max_retries=3, delay=5)
def prepare_dataset(cfg, tokenizer, processor=None):
prompters = []
if not cfg.pretraining_dataset:

View File

@@ -1,250 +0,0 @@
from typing import Optional
import torch
from torch import Tensor
from torch.distributed._tensor import DTensor
from torch.optim import Optimizer
from torchao.prototype.low_bit_optim.subclass_4bit import OptimState4bit
from torchao.prototype.low_bit_optim.subclass_8bit import OptimState8bit
from torchao.prototype.low_bit_optim.subclass_fp8 import OptimStateFp8
class _ShampooBase(Optimizer):
def __init__(
self,
params,
lr=1e-1,
momentum=0.0,
weight_decay=0.0,
eps=1e-4,
update_freq=1,
*,
block_size,
quantization_bits,
optimizer_state_class,
):
if lr <= 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
if eps < 0.0:
raise ValueError(f"Invalid eps value: {eps}")
if update_freq < 1:
raise ValueError(f"Invalid update_freq value: {update_freq}")
defaults = dict(
lr=lr,
momentum=momentum,
weight_decay=weight_decay,
eps=eps,
update_freq=update_freq,
)
super().__init__(params, defaults)
self.block_size = block_size
self.quantization_bits = quantization_bits
self.optimizer_state_class = optimizer_state_class
def step(self, closure: Optional[callable] = None) -> Optional[float]:
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
state["momentum_buffer"] = self._new_buffer(grad, True)
state["preconds"] = []
state["inv_preconds"] = []
for dim in grad.size():
state["preconds"].append(
self.optimizer_state_class.zeros(
(dim, dim),
signed=False,
block_size=self.block_size,
device=grad.device,
)
)
state["inv_preconds"].append(
torch.zeros((dim, dim), device=grad.device)
)
state["step"] += 1
beta = group["momentum"]
weight_decay = group["weight_decay"]
lr = group["lr"]
eps = group["eps"]
update_freq = group["update_freq"]
# Apply momentum
if beta > 0:
state["momentum_buffer"].mul_(beta).add_(grad, alpha=1 - beta)
grad = state["momentum_buffer"]
# Apply weight decay
if weight_decay > 0:
grad = grad.add(p.data, alpha=weight_decay)
# Preconditioning
order = grad.ndimension()
original_size = grad.size()
for dim_id, dim in enumerate(grad.size()):
precond = state["preconds"][dim_id]
inv_precond = state["inv_preconds"][dim_id]
# Reshape grad
grad = grad.transpose(0, dim_id).contiguous()
transposed_size = grad.size()
grad = grad.view(dim, -1)
grad_t = grad.t()
# Update preconditioner
precond_fp32 = precond.dequantize()
precond_update = grad @ grad_t
precond_fp32.add_(precond_update)
# Quantize preconditioner back
precond.copy_(precond_fp32)
# Update inverse preconditioner
if state["step"] % update_freq == 0:
inv_precond.copy_(
self._compute_inv_precond(precond_fp32, eps, order)
)
# Precondition grad
if dim_id == order - 1:
# Last dimension
grad = grad_t @ inv_precond
grad = grad.view(original_size)
else:
grad = inv_precond @ grad
grad = grad.view(transposed_size)
# Update parameter
p.data.add_(grad, alpha=-lr)
return loss
def _compute_inv_precond(self, precond: Tensor, eps: float, order: int):
# Add eps for numerical stability
precond = precond + torch.eye(precond.size(0), device=precond.device) * eps
# Compute matrix power
inv_precond = self._matrix_power(precond, -1.0 / (2 * order))
return inv_precond
def _matrix_power(self, matrix: Tensor, power: float) -> Tensor:
# Compute matrix power using SVD
u, s, v = torch.svd(matrix)
s_pow = s.pow(power)
return u @ torch.diag(s_pow) @ v.t()
# bring your own function to create zero-filled subclass
@staticmethod
def _subclass_zeros(p: Tensor, signed: bool, block_size: int):
raise NotImplementedError
# follow bitsandbytes, only quantize tensors >= 4096 values
# also wrap subclass in DTensor when needed
def _new_buffer(self, p: Tensor, signed: bool):
if p.numel() >= 4096 and p.numel() % self.block_size == 0:
if isinstance(p, DTensor):
out = DTensor.from_local(
local_tensor=self._subclass_zeros(
p.to_local(), signed, self.block_size
),
device_mesh=p.device_mesh,
placements=p.placements,
run_check=False,
)
else:
out = self._subclass_zeros(p, signed, self.block_size)
else:
out = torch.zeros_like(p)
return out
class Shampoo8bit(_ShampooBase):
def __init__(
self,
params,
lr=1e-1,
momentum=0.0,
weight_decay=0.0,
eps=1e-4,
update_freq=1,
*,
block_size=256,
):
super().__init__(
params,
lr,
momentum,
weight_decay,
eps,
update_freq,
block_size=block_size,
quantization_bits=8,
optimizer_state_class=OptimState8bit,
)
class Shampoo4bit(_ShampooBase):
def __init__(
self,
params,
lr=1e-1,
momentum=0.0,
weight_decay=0.0,
eps=1e-4,
update_freq=1,
*,
block_size=128,
):
super().__init__(
params,
lr,
momentum,
weight_decay,
eps,
update_freq,
block_size=block_size,
quantization_bits=4,
optimizer_state_class=OptimState4bit,
)
class ShampooFp8(_ShampooBase):
def __init__(
self,
params,
lr=1e-1,
momentum=0.0,
weight_decay=0.0,
eps=1e-4,
update_freq=1,
*,
block_size=256,
):
super().__init__(
params,
lr,
momentum,
weight_decay,
eps,
update_freq,
block_size=block_size,
quantization_bits=8, # FP8 uses 8 bits
optimizer_state_class=OptimStateFp8,
)

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2024 Nikhil Vyas
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,475 @@
# pylint: skip-file
# Copied from https://github.com/nikhilvyas/SOAP
from itertools import chain
import torch
import torch.optim as optim
# Parts of the code are modifications of Pytorch's AdamW optimizer
# Parts of the code are modifications of code from https://github.com/jiaweizzhao/GaLore/blob/master/galore_torch/galore_projector.py
class SOAP(optim.Optimizer):
"""
Implements SOAP algorithm (https://arxiv.org/abs/2409.11321).
Parameters:
params (`Iterable[nn.parameter.Parameter]`):
Iterable of parameters to optimize or dictionaries defining parameter groups.
lr (`float`, *optional*, defaults to 0.003):
The learning rate to use.
betas (`Tuple[float,float]`, *optional*, defaults to `(0.95, 0.95)`):
Adam's betas parameters (b1, b2).
shampoo_beta (`float`, *optional*, defaults to -1):
If >= 0, use this beta for the preconditioner (L and R in paper, state['GG'] below) moving average instead of betas[1].
eps (`float`, *optional*, defaults to 1e-08):
Adam's epsilon for numerical stability.
weight_decay (`float`, *optional*, defaults to 0.01): weight decay coefficient.
precondition_frequency (`int`, *optional*, defaults to 10):
How often to update the preconditioner.
max_precond_dim (`int`, *optional*, defaults to 10000):
Maximum dimension of the preconditioner.
Set to 10000, so that we exclude most common vocab sizes while including layers.
merge_dims (`bool`, *optional*, defaults to `False`):
Whether or not to merge dimensions of the preconditioner.
precondition_1d (`bool`, *optional*, defaults to `False`):
Whether or not to precondition 1D gradients.
normalize_grads (`bool`, *optional*, defaults to `False`):
Whether or not to normalize gradients per layer.
Helps at large precondition_frequency (~100 in our experiments),
but hurts performance at small precondition_frequency (~10 in our experiments).
data_format (`str`, *optional*, defaults to `channels_first`):
Data format of the input for convolutional layers.
Should be "channels_last" for data_format of NHWC and "channels_first" for NCHW.
correct_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use bias correction in Adam.
"""
def __init__(
self,
params,
lr: float = 3e-3,
betas=(0.95, 0.95),
shampoo_beta: float = -1,
eps: float = 1e-8,
weight_decay: float = 0.01,
precondition_frequency: int = 10,
max_precond_dim: int = 10000, #
merge_dims: bool = False, # Merge dimensions till the product of the dimensions is less than or equal to max_precond_dim.
precondition_1d: bool = False,
normalize_grads: bool = False,
data_format: str = "channels_first",
correct_bias: bool = True,
):
defaults = {
"lr": lr,
"betas": betas,
"shampoo_beta": shampoo_beta,
"eps": eps,
"weight_decay": weight_decay,
"precondition_frequency": precondition_frequency,
"max_precond_dim": max_precond_dim,
"merge_dims": merge_dims,
"precondition_1d": precondition_1d,
"normalize_grads": normalize_grads,
"correct_bias": correct_bias,
}
super().__init__(params, defaults)
self._data_format = data_format
def merge_dims(self, grad, max_precond_dim):
"""
Merges dimensions of the gradient tensor till the product of the dimensions is less than or equal to max_precond_dim.
"""
assert self._data_format in ["channels_first", "channels_last"]
if self._data_format == "channels_last" and grad.dim() == 4:
grad = grad.permute(0, 3, 1, 2)
shape = grad.shape
new_shape = []
curr_shape = 1
for sh in shape:
temp_shape = curr_shape * sh
if temp_shape > max_precond_dim:
if curr_shape > 1:
new_shape.append(curr_shape)
curr_shape = sh
else:
new_shape.append(sh)
curr_shape = 1
else:
curr_shape = temp_shape
if curr_shape > 1 or len(new_shape) == 0:
new_shape.append(curr_shape)
new_grad = grad.reshape(new_shape)
return new_grad
@torch.no_grad()
def step(self):
"""
Performs a single optimization step.
Arguments:
closure (`Callable`, *optional*): A closure that reevaluates the model and returns the loss.
"""
loss = None
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
if "step" not in state:
state["step"] = 0
# State initialization
if "exp_avg" not in state:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(grad)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(grad)
if "Q" not in state:
self.init_preconditioner(
grad,
state,
precondition_frequency=group["precondition_frequency"],
precondition_1d=group["precondition_1d"],
shampoo_beta=(
group["shampoo_beta"]
if group["shampoo_beta"] >= 0
else group["betas"][1]
),
max_precond_dim=group["max_precond_dim"],
merge_dims=group["merge_dims"],
)
self.update_preconditioner(
grad,
state,
max_precond_dim=group["max_precond_dim"],
merge_dims=group["merge_dims"],
precondition_1d=group["precondition_1d"],
)
continue # first step is skipped so that we never use the current gradients in the projection.
# Projecting gradients to the eigenbases of Shampoo's preconditioner
# i.e. projecting to the eigenbases of matrices in state['GG']
grad_projected = self.project(
grad,
state,
merge_dims=group["merge_dims"],
max_precond_dim=group["max_precond_dim"],
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1))
exp_avg_sq.mul_(beta2).add_(
grad_projected.square(), alpha=(1.0 - beta2)
)
denom = exp_avg_sq.sqrt().add_(group["eps"])
# Projecting the exponential moving average of gradients to the eigenbases of Shampoo's preconditioner
# i.e. projecting to the eigenbases of matrices in state['GG']
exp_avg_projected = self.project(
exp_avg,
state,
merge_dims=group["merge_dims"],
max_precond_dim=group["max_precond_dim"],
)
step_size = group["lr"]
if group["correct_bias"]:
bias_correction1 = 1.0 - beta1 ** (state["step"])
bias_correction2 = 1.0 - beta2 ** (state["step"])
step_size = step_size * (bias_correction2**0.5) / bias_correction1
# Projecting back the preconditioned (by Adam) exponential moving average of gradients
# to the original space
norm_grad = self.project_back(
exp_avg_projected / denom,
state,
merge_dims=group["merge_dims"],
max_precond_dim=group["max_precond_dim"],
)
if group["normalize_grads"]:
norm_grad = norm_grad / (1e-30 + torch.mean(norm_grad**2) ** 0.5)
p.add_(norm_grad, alpha=-step_size)
# From AdamW code: Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.add_(p, alpha=(-group["lr"] * group["weight_decay"]))
# Update is done after the gradient step to avoid using current gradients in the projection.
self.update_preconditioner(
grad,
state,
max_precond_dim=group["max_precond_dim"],
merge_dims=group["merge_dims"],
precondition_1d=group["precondition_1d"],
)
return loss
def init_preconditioner(
self,
grad,
state,
precondition_frequency=10,
shampoo_beta=0.95,
max_precond_dim=10000,
precondition_1d=False,
merge_dims=False,
):
"""
Initializes the preconditioner matrices (L and R in the paper).
"""
state[
"GG"
] = [] # Will hold all the preconditioner matrices (L and R in the paper).
if grad.dim() == 1:
if not precondition_1d or grad.shape[0] > max_precond_dim:
state["GG"].append([])
else:
state["GG"].append(
torch.zeros(grad.shape[0], grad.shape[0], device=grad.device)
)
else:
if merge_dims:
grad = self.merge_dims(grad, max_precond_dim)
for sh in grad.shape:
if sh > max_precond_dim:
state["GG"].append([])
else:
state["GG"].append(torch.zeros(sh, sh, device=grad.device))
state["Q"] = None # Will hold all the eigenbases of the preconditioner.
state["precondition_frequency"] = precondition_frequency
state["shampoo_beta"] = shampoo_beta
def project(self, grad, state, merge_dims=False, max_precond_dim=10000):
"""
Projects the gradient to the eigenbases of the preconditioner.
"""
original_shape = grad.shape
if merge_dims:
if grad.dim() == 4 and self._data_format == "channels_last":
permuted_shape = grad.permute(0, 3, 1, 2).shape
grad = self.merge_dims(grad, max_precond_dim)
for mat in state["Q"]:
if len(mat) > 0:
grad = torch.tensordot(
grad,
mat,
dims=[[0], [0]],
)
else:
permute_order = list(range(1, len(grad.shape))) + [0]
grad = grad.permute(permute_order)
if merge_dims:
if self._data_format == "channels_last" and len(original_shape) == 4:
grad = grad.reshape(permuted_shape).permute(0, 2, 3, 1)
else:
grad = grad.reshape(original_shape)
return grad
def update_preconditioner(
self,
grad,
state,
max_precond_dim=10000,
merge_dims=False,
precondition_1d=False,
):
"""
Updates the preconditioner matrices and the eigenbases (L, R, Q_L, Q_R in the paper).
"""
if grad.dim() == 1:
if precondition_1d and grad.shape[0] <= max_precond_dim:
state["GG"][0].lerp_(
grad.unsqueeze(1) @ grad.unsqueeze(0), 1 - state["shampoo_beta"]
)
else:
if merge_dims:
new_grad = self.merge_dims(grad, max_precond_dim)
for idx, sh in enumerate(new_grad.shape):
if sh <= max_precond_dim:
outer_product = torch.tensordot(
new_grad,
new_grad,
dims=[
[
*chain(
range(idx), range(idx + 1, len(new_grad.shape))
)
]
]
* 2,
)
state["GG"][idx].lerp_(outer_product, 1 - state["shampoo_beta"])
else:
for idx, sh in enumerate(grad.shape):
if sh <= max_precond_dim:
outer_product = torch.tensordot(
grad,
grad,
# Contracts across all dimensions except for k.
dims=[[*chain(range(idx), range(idx + 1, len(grad.shape)))]]
* 2,
)
state["GG"][idx].lerp_(outer_product, 1 - state["shampoo_beta"])
if state["Q"] is None:
state["Q"] = self.get_orthogonal_matrix(state["GG"])
if state["step"] > 0 and state["step"] % state["precondition_frequency"] == 0:
state["Q"] = self.get_orthogonal_matrix_QR(
state, max_precond_dim, merge_dims
)
def project_back(self, grad, state, merge_dims=False, max_precond_dim=10000):
"""
Projects the gradient back to the original space.
"""
original_shape = grad.shape
if merge_dims:
if self._data_format == "channels_last" and grad.dim() == 4:
permuted_shape = grad.permute(0, 3, 1, 2).shape
grad = self.merge_dims(grad, max_precond_dim)
for mat in state["Q"]:
if len(mat) > 0:
grad = torch.tensordot(
grad,
mat,
dims=[[0], [1]],
)
else:
permute_order = list(range(1, len(grad.shape))) + [0]
grad = grad.permute(permute_order)
if merge_dims:
if self._data_format == "channels_last" and len(original_shape) == 4:
grad = grad.reshape(permuted_shape).permute(0, 2, 3, 1)
else:
grad = grad.reshape(original_shape)
return grad
def get_orthogonal_matrix(self, mat):
"""
Computes the eigenbases of the preconditioner using torch.linalg.eigh decomposition.
"""
matrix = []
for m in mat:
if len(m) == 0:
matrix.append([])
continue
if m.data.dtype != torch.float:
float_data = False
original_type = m.data.dtype
original_device = m.data.device
matrix.append(m.data.float())
else:
float_data = True
matrix.append(m.data)
final = []
for m in matrix:
if len(m) == 0:
final.append([])
continue
try:
_, Q = torch.linalg.eigh(
m + 1e-30 * torch.eye(m.shape[0], device=m.device)
)
except: # pylint: disable=bare-except # noqa: E722
_, Q = torch.linalg.eigh(
m.to(torch.float64) + 1e-30 * torch.eye(m.shape[0], device=m.device)
)
Q = Q.to(m.dtype)
Q = torch.flip(Q, [1])
if not float_data:
Q = Q.to(original_device).type(original_type)
final.append(Q)
return final
def get_orthogonal_matrix_QR(self, state, max_precond_dim=10000, merge_dims=False):
"""
Computes the eigenbases of the preconditioner using one round of power iteration
followed by torch.linalg.qr decomposition.
"""
precond_list = state["GG"]
orth_list = state["Q"]
matrix = []
orth_matrix = []
for m, o in zip(precond_list, orth_list):
if len(m) == 0:
matrix.append([])
orth_matrix.append([])
continue
if m.data.dtype != torch.float:
float_data = False
original_type = m.data.dtype
original_device = m.data.device
matrix.append(m.data.float())
orth_matrix.append(o.data.float())
else:
float_data = True
matrix.append(m.data.float())
orth_matrix.append(o.data.float())
orig_shape = state["exp_avg_sq"].shape
if self._data_format == "channels_last" and len(orig_shape) == 4:
permuted_shape = state["exp_avg_sq"].permute(0, 3, 1, 2).shape
if merge_dims:
exp_avg_sq = self.merge_dims(state["exp_avg_sq"], max_precond_dim)
else:
exp_avg_sq = state["exp_avg_sq"]
final = []
for ind, (m, o) in enumerate(zip(matrix, orth_matrix)):
if len(m) == 0:
final.append([])
continue
est_eig = torch.diag(o.T @ m @ o)
sort_idx = torch.argsort(est_eig, descending=True)
exp_avg_sq = exp_avg_sq.index_select(ind, sort_idx)
o = o[:, sort_idx]
power_iter = m @ o
Q, _ = torch.linalg.qr(power_iter)
if not float_data:
Q = Q.to(original_device).type(original_type)
final.append(Q)
if merge_dims:
if self._data_format == "channels_last" and len(orig_shape) == 4:
exp_avg_sq = exp_avg_sq.reshape(permuted_shape).permute(0, 2, 3, 1)
else:
exp_avg_sq = exp_avg_sq.reshape(orig_shape)
state["exp_avg_sq"] = exp_avg_sq
return final

View File

@@ -1,6 +1,7 @@
"""
Simple end-to-end test for Liger integration
"""
import unittest
from pathlib import Path

View File

@@ -65,3 +65,44 @@ class TestCustomOptimizers(unittest.TestCase):
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
assert (Path(temp_dir) / "adapter_model.bin").exists()
@with_temp_dir
def test_soap(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(
{
"base_model": "HuggingFaceTB/SmolLM-135M",
"sequence_len": 1024,
"load_in_8bit": True,
"adapter": "lora",
"lora_r": 8,
"lora_alpha": 16,
"lora_dropout": 0.05,
"lora_target_linear": True,
"val_set_size": 0.1,
"special_tokens": {
"pad_token": "<|endoftext|>",
},
"datasets": [
{
"path": "vicgalle/alpaca-gpt4",
"type": "alpaca",
},
],
"num_epochs": 1,
"micro_batch_size": 8,
"gradient_accumulation_steps": 1,
"output_dir": temp_dir,
"learning_rate": 0.00001,
"optimizer": "soap",
"optim_soap_beta1": 0.95,
"optim_soap_beta2": 0.95,
"lr_scheduler": "cosine",
}
)
normalize_config(cfg)
cli_args = TrainerCliArgs()
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
assert (Path(temp_dir) / "adapter_model.bin").exists()

View File

@@ -1,80 +0,0 @@
"""
config validation tests for swiglu args
"""
# pylint: disable=duplicate-code
import logging
from typing import Optional
import pytest
from axolotl.utils.config import validate_config
from axolotl.utils.dict import DictDefault
@pytest.fixture(name="minimal_base_cfg")
def fixture_cfg():
return DictDefault(
{
"base_model": "TinyLlama/TinyLlama-1.1B-Chat-v0.6",
"learning_rate": 0.000001,
"datasets": [
{
"path": "mhenrichsen/alpaca_2k_test",
"type": "alpaca",
}
],
"micro_batch_size": 1,
"gradient_accumulation_steps": 1,
}
)
class BaseValidation:
"""
Base validation module to setup the log capture
"""
_caplog: Optional[pytest.LogCaptureFixture] = None
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
# pylint: disable=too-many-public-methods
class TestValidation(BaseValidation):
"""
Test the validation module for liger
"""
def test_deprecated_swiglu(self, minimal_cfg):
test_cfg = DictDefault(
{
"liger_swiglu": False,
}
| minimal_cfg
)
with self._caplog.at_level(logging.WARNING):
updated_cfg = validate_config(test_cfg)
assert (
"The 'liger_swiglu' argument is deprecated"
in self._caplog.records[0].message
)
assert updated_cfg.liger_swiglu is None
assert updated_cfg.liger_glu_activations is False
def test_conflict_swiglu_ligergluactivation(self, minimal_cfg):
test_cfg = DictDefault(
{
"liger_swiglu": False,
"liger_glu_activations": True,
}
| minimal_cfg
)
with pytest.raises(
ValueError,
match=r".*You cannot have both `liger_swiglu` and `liger_glu_activation` set.*",
):
validate_config(test_cfg)

View File

@@ -306,10 +306,6 @@ class TestDatasetPreparation(unittest.TestCase):
"""Verify that processing data from the hub works with a specific revision"""
with tempfile.TemporaryDirectory() as tmp_dir:
prepared_path = Path(tmp_dir) / "prepared"
# make sure prepared_path is empty
shutil.rmtree(prepared_path, ignore_errors=True)
cfg = DictDefault(
{
"tokenizer_config": "huggyllama/llama-7b",